]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/ext4/inode.c
ext4: Use new framework for data=ordered mode in JBD2
[linux-2.6-omap-h63xx.git] / fs / ext4 / inode.c
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  Goal-directed block allocation by Stephen Tweedie
16  *      (sct@redhat.com), 1993, 1998
17  *  Big-endian to little-endian byte-swapping/bitmaps by
18  *        David S. Miller (davem@caip.rutgers.edu), 1995
19  *  64-bit file support on 64-bit platforms by Jakub Jelinek
20  *      (jj@sunsite.ms.mff.cuni.cz)
21  *
22  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
23  */
24
25 #include <linux/module.h>
26 #include <linux/fs.h>
27 #include <linux/time.h>
28 #include <linux/jbd2.h>
29 #include <linux/highuid.h>
30 #include <linux/pagemap.h>
31 #include <linux/quotaops.h>
32 #include <linux/string.h>
33 #include <linux/buffer_head.h>
34 #include <linux/writeback.h>
35 #include <linux/mpage.h>
36 #include <linux/uio.h>
37 #include <linux/bio.h>
38 #include "ext4_jbd2.h"
39 #include "xattr.h"
40 #include "acl.h"
41
42 static inline int ext4_begin_ordered_truncate(struct inode *inode,
43                                               loff_t new_size)
44 {
45         return jbd2_journal_begin_ordered_truncate(&EXT4_I(inode)->jinode,
46                                                    new_size);
47 }
48
49 /*
50  * Test whether an inode is a fast symlink.
51  */
52 static int ext4_inode_is_fast_symlink(struct inode *inode)
53 {
54         int ea_blocks = EXT4_I(inode)->i_file_acl ?
55                 (inode->i_sb->s_blocksize >> 9) : 0;
56
57         return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
58 }
59
60 /*
61  * The ext4 forget function must perform a revoke if we are freeing data
62  * which has been journaled.  Metadata (eg. indirect blocks) must be
63  * revoked in all cases.
64  *
65  * "bh" may be NULL: a metadata block may have been freed from memory
66  * but there may still be a record of it in the journal, and that record
67  * still needs to be revoked.
68  */
69 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
70                         struct buffer_head *bh, ext4_fsblk_t blocknr)
71 {
72         int err;
73
74         might_sleep();
75
76         BUFFER_TRACE(bh, "enter");
77
78         jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
79                   "data mode %lx\n",
80                   bh, is_metadata, inode->i_mode,
81                   test_opt(inode->i_sb, DATA_FLAGS));
82
83         /* Never use the revoke function if we are doing full data
84          * journaling: there is no need to, and a V1 superblock won't
85          * support it.  Otherwise, only skip the revoke on un-journaled
86          * data blocks. */
87
88         if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
89             (!is_metadata && !ext4_should_journal_data(inode))) {
90                 if (bh) {
91                         BUFFER_TRACE(bh, "call jbd2_journal_forget");
92                         return ext4_journal_forget(handle, bh);
93                 }
94                 return 0;
95         }
96
97         /*
98          * data!=journal && (is_metadata || should_journal_data(inode))
99          */
100         BUFFER_TRACE(bh, "call ext4_journal_revoke");
101         err = ext4_journal_revoke(handle, blocknr, bh);
102         if (err)
103                 ext4_abort(inode->i_sb, __func__,
104                            "error %d when attempting revoke", err);
105         BUFFER_TRACE(bh, "exit");
106         return err;
107 }
108
109 /*
110  * Work out how many blocks we need to proceed with the next chunk of a
111  * truncate transaction.
112  */
113 static unsigned long blocks_for_truncate(struct inode *inode)
114 {
115         ext4_lblk_t needed;
116
117         needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
118
119         /* Give ourselves just enough room to cope with inodes in which
120          * i_blocks is corrupt: we've seen disk corruptions in the past
121          * which resulted in random data in an inode which looked enough
122          * like a regular file for ext4 to try to delete it.  Things
123          * will go a bit crazy if that happens, but at least we should
124          * try not to panic the whole kernel. */
125         if (needed < 2)
126                 needed = 2;
127
128         /* But we need to bound the transaction so we don't overflow the
129          * journal. */
130         if (needed > EXT4_MAX_TRANS_DATA)
131                 needed = EXT4_MAX_TRANS_DATA;
132
133         return EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
134 }
135
136 /*
137  * Truncate transactions can be complex and absolutely huge.  So we need to
138  * be able to restart the transaction at a conventient checkpoint to make
139  * sure we don't overflow the journal.
140  *
141  * start_transaction gets us a new handle for a truncate transaction,
142  * and extend_transaction tries to extend the existing one a bit.  If
143  * extend fails, we need to propagate the failure up and restart the
144  * transaction in the top-level truncate loop. --sct
145  */
146 static handle_t *start_transaction(struct inode *inode)
147 {
148         handle_t *result;
149
150         result = ext4_journal_start(inode, blocks_for_truncate(inode));
151         if (!IS_ERR(result))
152                 return result;
153
154         ext4_std_error(inode->i_sb, PTR_ERR(result));
155         return result;
156 }
157
158 /*
159  * Try to extend this transaction for the purposes of truncation.
160  *
161  * Returns 0 if we managed to create more room.  If we can't create more
162  * room, and the transaction must be restarted we return 1.
163  */
164 static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
165 {
166         if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS)
167                 return 0;
168         if (!ext4_journal_extend(handle, blocks_for_truncate(inode)))
169                 return 0;
170         return 1;
171 }
172
173 /*
174  * Restart the transaction associated with *handle.  This does a commit,
175  * so before we call here everything must be consistently dirtied against
176  * this transaction.
177  */
178 static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
179 {
180         jbd_debug(2, "restarting handle %p\n", handle);
181         return ext4_journal_restart(handle, blocks_for_truncate(inode));
182 }
183
184 /*
185  * Called at the last iput() if i_nlink is zero.
186  */
187 void ext4_delete_inode (struct inode * inode)
188 {
189         handle_t *handle;
190
191         if (ext4_should_order_data(inode))
192                 ext4_begin_ordered_truncate(inode, 0);
193         truncate_inode_pages(&inode->i_data, 0);
194
195         if (is_bad_inode(inode))
196                 goto no_delete;
197
198         handle = start_transaction(inode);
199         if (IS_ERR(handle)) {
200                 /*
201                  * If we're going to skip the normal cleanup, we still need to
202                  * make sure that the in-core orphan linked list is properly
203                  * cleaned up.
204                  */
205                 ext4_orphan_del(NULL, inode);
206                 goto no_delete;
207         }
208
209         if (IS_SYNC(inode))
210                 handle->h_sync = 1;
211         inode->i_size = 0;
212         if (inode->i_blocks)
213                 ext4_truncate(inode);
214         /*
215          * Kill off the orphan record which ext4_truncate created.
216          * AKPM: I think this can be inside the above `if'.
217          * Note that ext4_orphan_del() has to be able to cope with the
218          * deletion of a non-existent orphan - this is because we don't
219          * know if ext4_truncate() actually created an orphan record.
220          * (Well, we could do this if we need to, but heck - it works)
221          */
222         ext4_orphan_del(handle, inode);
223         EXT4_I(inode)->i_dtime  = get_seconds();
224
225         /*
226          * One subtle ordering requirement: if anything has gone wrong
227          * (transaction abort, IO errors, whatever), then we can still
228          * do these next steps (the fs will already have been marked as
229          * having errors), but we can't free the inode if the mark_dirty
230          * fails.
231          */
232         if (ext4_mark_inode_dirty(handle, inode))
233                 /* If that failed, just do the required in-core inode clear. */
234                 clear_inode(inode);
235         else
236                 ext4_free_inode(handle, inode);
237         ext4_journal_stop(handle);
238         return;
239 no_delete:
240         clear_inode(inode);     /* We must guarantee clearing of inode... */
241 }
242
243 typedef struct {
244         __le32  *p;
245         __le32  key;
246         struct buffer_head *bh;
247 } Indirect;
248
249 static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
250 {
251         p->key = *(p->p = v);
252         p->bh = bh;
253 }
254
255 /**
256  *      ext4_block_to_path - parse the block number into array of offsets
257  *      @inode: inode in question (we are only interested in its superblock)
258  *      @i_block: block number to be parsed
259  *      @offsets: array to store the offsets in
260  *      @boundary: set this non-zero if the referred-to block is likely to be
261  *             followed (on disk) by an indirect block.
262  *
263  *      To store the locations of file's data ext4 uses a data structure common
264  *      for UNIX filesystems - tree of pointers anchored in the inode, with
265  *      data blocks at leaves and indirect blocks in intermediate nodes.
266  *      This function translates the block number into path in that tree -
267  *      return value is the path length and @offsets[n] is the offset of
268  *      pointer to (n+1)th node in the nth one. If @block is out of range
269  *      (negative or too large) warning is printed and zero returned.
270  *
271  *      Note: function doesn't find node addresses, so no IO is needed. All
272  *      we need to know is the capacity of indirect blocks (taken from the
273  *      inode->i_sb).
274  */
275
276 /*
277  * Portability note: the last comparison (check that we fit into triple
278  * indirect block) is spelled differently, because otherwise on an
279  * architecture with 32-bit longs and 8Kb pages we might get into trouble
280  * if our filesystem had 8Kb blocks. We might use long long, but that would
281  * kill us on x86. Oh, well, at least the sign propagation does not matter -
282  * i_block would have to be negative in the very beginning, so we would not
283  * get there at all.
284  */
285
286 static int ext4_block_to_path(struct inode *inode,
287                         ext4_lblk_t i_block,
288                         ext4_lblk_t offsets[4], int *boundary)
289 {
290         int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
291         int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
292         const long direct_blocks = EXT4_NDIR_BLOCKS,
293                 indirect_blocks = ptrs,
294                 double_blocks = (1 << (ptrs_bits * 2));
295         int n = 0;
296         int final = 0;
297
298         if (i_block < 0) {
299                 ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
300         } else if (i_block < direct_blocks) {
301                 offsets[n++] = i_block;
302                 final = direct_blocks;
303         } else if ( (i_block -= direct_blocks) < indirect_blocks) {
304                 offsets[n++] = EXT4_IND_BLOCK;
305                 offsets[n++] = i_block;
306                 final = ptrs;
307         } else if ((i_block -= indirect_blocks) < double_blocks) {
308                 offsets[n++] = EXT4_DIND_BLOCK;
309                 offsets[n++] = i_block >> ptrs_bits;
310                 offsets[n++] = i_block & (ptrs - 1);
311                 final = ptrs;
312         } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
313                 offsets[n++] = EXT4_TIND_BLOCK;
314                 offsets[n++] = i_block >> (ptrs_bits * 2);
315                 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
316                 offsets[n++] = i_block & (ptrs - 1);
317                 final = ptrs;
318         } else {
319                 ext4_warning(inode->i_sb, "ext4_block_to_path",
320                                 "block %lu > max",
321                                 i_block + direct_blocks +
322                                 indirect_blocks + double_blocks);
323         }
324         if (boundary)
325                 *boundary = final - 1 - (i_block & (ptrs - 1));
326         return n;
327 }
328
329 /**
330  *      ext4_get_branch - read the chain of indirect blocks leading to data
331  *      @inode: inode in question
332  *      @depth: depth of the chain (1 - direct pointer, etc.)
333  *      @offsets: offsets of pointers in inode/indirect blocks
334  *      @chain: place to store the result
335  *      @err: here we store the error value
336  *
337  *      Function fills the array of triples <key, p, bh> and returns %NULL
338  *      if everything went OK or the pointer to the last filled triple
339  *      (incomplete one) otherwise. Upon the return chain[i].key contains
340  *      the number of (i+1)-th block in the chain (as it is stored in memory,
341  *      i.e. little-endian 32-bit), chain[i].p contains the address of that
342  *      number (it points into struct inode for i==0 and into the bh->b_data
343  *      for i>0) and chain[i].bh points to the buffer_head of i-th indirect
344  *      block for i>0 and NULL for i==0. In other words, it holds the block
345  *      numbers of the chain, addresses they were taken from (and where we can
346  *      verify that chain did not change) and buffer_heads hosting these
347  *      numbers.
348  *
349  *      Function stops when it stumbles upon zero pointer (absent block)
350  *              (pointer to last triple returned, *@err == 0)
351  *      or when it gets an IO error reading an indirect block
352  *              (ditto, *@err == -EIO)
353  *      or when it reads all @depth-1 indirect blocks successfully and finds
354  *      the whole chain, all way to the data (returns %NULL, *err == 0).
355  *
356  *      Need to be called with
357  *      down_read(&EXT4_I(inode)->i_data_sem)
358  */
359 static Indirect *ext4_get_branch(struct inode *inode, int depth,
360                                  ext4_lblk_t  *offsets,
361                                  Indirect chain[4], int *err)
362 {
363         struct super_block *sb = inode->i_sb;
364         Indirect *p = chain;
365         struct buffer_head *bh;
366
367         *err = 0;
368         /* i_data is not going away, no lock needed */
369         add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
370         if (!p->key)
371                 goto no_block;
372         while (--depth) {
373                 bh = sb_bread(sb, le32_to_cpu(p->key));
374                 if (!bh)
375                         goto failure;
376                 add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
377                 /* Reader: end */
378                 if (!p->key)
379                         goto no_block;
380         }
381         return NULL;
382
383 failure:
384         *err = -EIO;
385 no_block:
386         return p;
387 }
388
389 /**
390  *      ext4_find_near - find a place for allocation with sufficient locality
391  *      @inode: owner
392  *      @ind: descriptor of indirect block.
393  *
394  *      This function returns the preferred place for block allocation.
395  *      It is used when heuristic for sequential allocation fails.
396  *      Rules are:
397  *        + if there is a block to the left of our position - allocate near it.
398  *        + if pointer will live in indirect block - allocate near that block.
399  *        + if pointer will live in inode - allocate in the same
400  *          cylinder group.
401  *
402  * In the latter case we colour the starting block by the callers PID to
403  * prevent it from clashing with concurrent allocations for a different inode
404  * in the same block group.   The PID is used here so that functionally related
405  * files will be close-by on-disk.
406  *
407  *      Caller must make sure that @ind is valid and will stay that way.
408  */
409 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
410 {
411         struct ext4_inode_info *ei = EXT4_I(inode);
412         __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
413         __le32 *p;
414         ext4_fsblk_t bg_start;
415         ext4_fsblk_t last_block;
416         ext4_grpblk_t colour;
417
418         /* Try to find previous block */
419         for (p = ind->p - 1; p >= start; p--) {
420                 if (*p)
421                         return le32_to_cpu(*p);
422         }
423
424         /* No such thing, so let's try location of indirect block */
425         if (ind->bh)
426                 return ind->bh->b_blocknr;
427
428         /*
429          * It is going to be referred to from the inode itself? OK, just put it
430          * into the same cylinder group then.
431          */
432         bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
433         last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
434
435         if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
436                 colour = (current->pid % 16) *
437                         (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
438         else
439                 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
440         return bg_start + colour;
441 }
442
443 /**
444  *      ext4_find_goal - find a preferred place for allocation.
445  *      @inode: owner
446  *      @block:  block we want
447  *      @partial: pointer to the last triple within a chain
448  *
449  *      Normally this function find the preferred place for block allocation,
450  *      returns it.
451  */
452 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
453                 Indirect *partial)
454 {
455         struct ext4_block_alloc_info *block_i;
456
457         block_i =  EXT4_I(inode)->i_block_alloc_info;
458
459         /*
460          * try the heuristic for sequential allocation,
461          * failing that at least try to get decent locality.
462          */
463         if (block_i && (block == block_i->last_alloc_logical_block + 1)
464                 && (block_i->last_alloc_physical_block != 0)) {
465                 return block_i->last_alloc_physical_block + 1;
466         }
467
468         return ext4_find_near(inode, partial);
469 }
470
471 /**
472  *      ext4_blks_to_allocate: Look up the block map and count the number
473  *      of direct blocks need to be allocated for the given branch.
474  *
475  *      @branch: chain of indirect blocks
476  *      @k: number of blocks need for indirect blocks
477  *      @blks: number of data blocks to be mapped.
478  *      @blocks_to_boundary:  the offset in the indirect block
479  *
480  *      return the total number of blocks to be allocate, including the
481  *      direct and indirect blocks.
482  */
483 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
484                 int blocks_to_boundary)
485 {
486         unsigned long count = 0;
487
488         /*
489          * Simple case, [t,d]Indirect block(s) has not allocated yet
490          * then it's clear blocks on that path have not allocated
491          */
492         if (k > 0) {
493                 /* right now we don't handle cross boundary allocation */
494                 if (blks < blocks_to_boundary + 1)
495                         count += blks;
496                 else
497                         count += blocks_to_boundary + 1;
498                 return count;
499         }
500
501         count++;
502         while (count < blks && count <= blocks_to_boundary &&
503                 le32_to_cpu(*(branch[0].p + count)) == 0) {
504                 count++;
505         }
506         return count;
507 }
508
509 /**
510  *      ext4_alloc_blocks: multiple allocate blocks needed for a branch
511  *      @indirect_blks: the number of blocks need to allocate for indirect
512  *                      blocks
513  *
514  *      @new_blocks: on return it will store the new block numbers for
515  *      the indirect blocks(if needed) and the first direct block,
516  *      @blks:  on return it will store the total number of allocated
517  *              direct blocks
518  */
519 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
520                                 ext4_lblk_t iblock, ext4_fsblk_t goal,
521                                 int indirect_blks, int blks,
522                                 ext4_fsblk_t new_blocks[4], int *err)
523 {
524         int target, i;
525         unsigned long count = 0, blk_allocated = 0;
526         int index = 0;
527         ext4_fsblk_t current_block = 0;
528         int ret = 0;
529
530         /*
531          * Here we try to allocate the requested multiple blocks at once,
532          * on a best-effort basis.
533          * To build a branch, we should allocate blocks for
534          * the indirect blocks(if not allocated yet), and at least
535          * the first direct block of this branch.  That's the
536          * minimum number of blocks need to allocate(required)
537          */
538         /* first we try to allocate the indirect blocks */
539         target = indirect_blks;
540         while (target > 0) {
541                 count = target;
542                 /* allocating blocks for indirect blocks and direct blocks */
543                 current_block = ext4_new_meta_blocks(handle, inode,
544                                                         goal, &count, err);
545                 if (*err)
546                         goto failed_out;
547
548                 target -= count;
549                 /* allocate blocks for indirect blocks */
550                 while (index < indirect_blks && count) {
551                         new_blocks[index++] = current_block++;
552                         count--;
553                 }
554                 if (count > 0) {
555                         /*
556                          * save the new block number
557                          * for the first direct block
558                          */
559                         new_blocks[index] = current_block;
560                         printk(KERN_INFO "%s returned more blocks than "
561                                                 "requested\n", __func__);
562                         WARN_ON(1);
563                         break;
564                 }
565         }
566
567         target = blks - count ;
568         blk_allocated = count;
569         if (!target)
570                 goto allocated;
571         /* Now allocate data blocks */
572         count = target;
573         /* allocating blocks for data blocks */
574         current_block = ext4_new_blocks(handle, inode, iblock,
575                                                 goal, &count, err);
576         if (*err && (target == blks)) {
577                 /*
578                  * if the allocation failed and we didn't allocate
579                  * any blocks before
580                  */
581                 goto failed_out;
582         }
583         if (!*err) {
584                 if (target == blks) {
585                 /*
586                  * save the new block number
587                  * for the first direct block
588                  */
589                         new_blocks[index] = current_block;
590                 }
591                 blk_allocated += count;
592         }
593 allocated:
594         /* total number of blocks allocated for direct blocks */
595         ret = blk_allocated;
596         *err = 0;
597         return ret;
598 failed_out:
599         for (i = 0; i <index; i++)
600                 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
601         return ret;
602 }
603
604 /**
605  *      ext4_alloc_branch - allocate and set up a chain of blocks.
606  *      @inode: owner
607  *      @indirect_blks: number of allocated indirect blocks
608  *      @blks: number of allocated direct blocks
609  *      @offsets: offsets (in the blocks) to store the pointers to next.
610  *      @branch: place to store the chain in.
611  *
612  *      This function allocates blocks, zeroes out all but the last one,
613  *      links them into chain and (if we are synchronous) writes them to disk.
614  *      In other words, it prepares a branch that can be spliced onto the
615  *      inode. It stores the information about that chain in the branch[], in
616  *      the same format as ext4_get_branch() would do. We are calling it after
617  *      we had read the existing part of chain and partial points to the last
618  *      triple of that (one with zero ->key). Upon the exit we have the same
619  *      picture as after the successful ext4_get_block(), except that in one
620  *      place chain is disconnected - *branch->p is still zero (we did not
621  *      set the last link), but branch->key contains the number that should
622  *      be placed into *branch->p to fill that gap.
623  *
624  *      If allocation fails we free all blocks we've allocated (and forget
625  *      their buffer_heads) and return the error value the from failed
626  *      ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
627  *      as described above and return 0.
628  */
629 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
630                                 ext4_lblk_t iblock, int indirect_blks,
631                                 int *blks, ext4_fsblk_t goal,
632                                 ext4_lblk_t *offsets, Indirect *branch)
633 {
634         int blocksize = inode->i_sb->s_blocksize;
635         int i, n = 0;
636         int err = 0;
637         struct buffer_head *bh;
638         int num;
639         ext4_fsblk_t new_blocks[4];
640         ext4_fsblk_t current_block;
641
642         num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
643                                 *blks, new_blocks, &err);
644         if (err)
645                 return err;
646
647         branch[0].key = cpu_to_le32(new_blocks[0]);
648         /*
649          * metadata blocks and data blocks are allocated.
650          */
651         for (n = 1; n <= indirect_blks;  n++) {
652                 /*
653                  * Get buffer_head for parent block, zero it out
654                  * and set the pointer to new one, then send
655                  * parent to disk.
656                  */
657                 bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
658                 branch[n].bh = bh;
659                 lock_buffer(bh);
660                 BUFFER_TRACE(bh, "call get_create_access");
661                 err = ext4_journal_get_create_access(handle, bh);
662                 if (err) {
663                         unlock_buffer(bh);
664                         brelse(bh);
665                         goto failed;
666                 }
667
668                 memset(bh->b_data, 0, blocksize);
669                 branch[n].p = (__le32 *) bh->b_data + offsets[n];
670                 branch[n].key = cpu_to_le32(new_blocks[n]);
671                 *branch[n].p = branch[n].key;
672                 if ( n == indirect_blks) {
673                         current_block = new_blocks[n];
674                         /*
675                          * End of chain, update the last new metablock of
676                          * the chain to point to the new allocated
677                          * data blocks numbers
678                          */
679                         for (i=1; i < num; i++)
680                                 *(branch[n].p + i) = cpu_to_le32(++current_block);
681                 }
682                 BUFFER_TRACE(bh, "marking uptodate");
683                 set_buffer_uptodate(bh);
684                 unlock_buffer(bh);
685
686                 BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
687                 err = ext4_journal_dirty_metadata(handle, bh);
688                 if (err)
689                         goto failed;
690         }
691         *blks = num;
692         return err;
693 failed:
694         /* Allocation failed, free what we already allocated */
695         for (i = 1; i <= n ; i++) {
696                 BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
697                 ext4_journal_forget(handle, branch[i].bh);
698         }
699         for (i = 0; i <indirect_blks; i++)
700                 ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
701
702         ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
703
704         return err;
705 }
706
707 /**
708  * ext4_splice_branch - splice the allocated branch onto inode.
709  * @inode: owner
710  * @block: (logical) number of block we are adding
711  * @chain: chain of indirect blocks (with a missing link - see
712  *      ext4_alloc_branch)
713  * @where: location of missing link
714  * @num:   number of indirect blocks we are adding
715  * @blks:  number of direct blocks we are adding
716  *
717  * This function fills the missing link and does all housekeeping needed in
718  * inode (->i_blocks, etc.). In case of success we end up with the full
719  * chain to new block and return 0.
720  */
721 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
722                         ext4_lblk_t block, Indirect *where, int num, int blks)
723 {
724         int i;
725         int err = 0;
726         struct ext4_block_alloc_info *block_i;
727         ext4_fsblk_t current_block;
728
729         block_i = EXT4_I(inode)->i_block_alloc_info;
730         /*
731          * If we're splicing into a [td]indirect block (as opposed to the
732          * inode) then we need to get write access to the [td]indirect block
733          * before the splice.
734          */
735         if (where->bh) {
736                 BUFFER_TRACE(where->bh, "get_write_access");
737                 err = ext4_journal_get_write_access(handle, where->bh);
738                 if (err)
739                         goto err_out;
740         }
741         /* That's it */
742
743         *where->p = where->key;
744
745         /*
746          * Update the host buffer_head or inode to point to more just allocated
747          * direct blocks blocks
748          */
749         if (num == 0 && blks > 1) {
750                 current_block = le32_to_cpu(where->key) + 1;
751                 for (i = 1; i < blks; i++)
752                         *(where->p + i ) = cpu_to_le32(current_block++);
753         }
754
755         /*
756          * update the most recently allocated logical & physical block
757          * in i_block_alloc_info, to assist find the proper goal block for next
758          * allocation
759          */
760         if (block_i) {
761                 block_i->last_alloc_logical_block = block + blks - 1;
762                 block_i->last_alloc_physical_block =
763                                 le32_to_cpu(where[num].key) + blks - 1;
764         }
765
766         /* We are done with atomic stuff, now do the rest of housekeeping */
767
768         inode->i_ctime = ext4_current_time(inode);
769         ext4_mark_inode_dirty(handle, inode);
770
771         /* had we spliced it onto indirect block? */
772         if (where->bh) {
773                 /*
774                  * If we spliced it onto an indirect block, we haven't
775                  * altered the inode.  Note however that if it is being spliced
776                  * onto an indirect block at the very end of the file (the
777                  * file is growing) then we *will* alter the inode to reflect
778                  * the new i_size.  But that is not done here - it is done in
779                  * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode.
780                  */
781                 jbd_debug(5, "splicing indirect only\n");
782                 BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata");
783                 err = ext4_journal_dirty_metadata(handle, where->bh);
784                 if (err)
785                         goto err_out;
786         } else {
787                 /*
788                  * OK, we spliced it into the inode itself on a direct block.
789                  * Inode was dirtied above.
790                  */
791                 jbd_debug(5, "splicing direct\n");
792         }
793         return err;
794
795 err_out:
796         for (i = 1; i <= num; i++) {
797                 BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
798                 ext4_journal_forget(handle, where[i].bh);
799                 ext4_free_blocks(handle, inode,
800                                         le32_to_cpu(where[i-1].key), 1, 0);
801         }
802         ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
803
804         return err;
805 }
806
807 /*
808  * Allocation strategy is simple: if we have to allocate something, we will
809  * have to go the whole way to leaf. So let's do it before attaching anything
810  * to tree, set linkage between the newborn blocks, write them if sync is
811  * required, recheck the path, free and repeat if check fails, otherwise
812  * set the last missing link (that will protect us from any truncate-generated
813  * removals - all blocks on the path are immune now) and possibly force the
814  * write on the parent block.
815  * That has a nice additional property: no special recovery from the failed
816  * allocations is needed - we simply release blocks and do not touch anything
817  * reachable from inode.
818  *
819  * `handle' can be NULL if create == 0.
820  *
821  * return > 0, # of blocks mapped or allocated.
822  * return = 0, if plain lookup failed.
823  * return < 0, error case.
824  *
825  *
826  * Need to be called with
827  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
828  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
829  */
830 int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
831                 ext4_lblk_t iblock, unsigned long maxblocks,
832                 struct buffer_head *bh_result,
833                 int create, int extend_disksize)
834 {
835         int err = -EIO;
836         ext4_lblk_t offsets[4];
837         Indirect chain[4];
838         Indirect *partial;
839         ext4_fsblk_t goal;
840         int indirect_blks;
841         int blocks_to_boundary = 0;
842         int depth;
843         struct ext4_inode_info *ei = EXT4_I(inode);
844         int count = 0;
845         ext4_fsblk_t first_block = 0;
846
847
848         J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
849         J_ASSERT(handle != NULL || create == 0);
850         depth = ext4_block_to_path(inode, iblock, offsets,
851                                         &blocks_to_boundary);
852
853         if (depth == 0)
854                 goto out;
855
856         partial = ext4_get_branch(inode, depth, offsets, chain, &err);
857
858         /* Simplest case - block found, no allocation needed */
859         if (!partial) {
860                 first_block = le32_to_cpu(chain[depth - 1].key);
861                 clear_buffer_new(bh_result);
862                 count++;
863                 /*map more blocks*/
864                 while (count < maxblocks && count <= blocks_to_boundary) {
865                         ext4_fsblk_t blk;
866
867                         blk = le32_to_cpu(*(chain[depth-1].p + count));
868
869                         if (blk == first_block + count)
870                                 count++;
871                         else
872                                 break;
873                 }
874                 goto got_it;
875         }
876
877         /* Next simple case - plain lookup or failed read of indirect block */
878         if (!create || err == -EIO)
879                 goto cleanup;
880
881         /*
882          * Okay, we need to do block allocation.  Lazily initialize the block
883          * allocation info here if necessary
884         */
885         if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
886                 ext4_init_block_alloc_info(inode);
887
888         goal = ext4_find_goal(inode, iblock, partial);
889
890         /* the number of blocks need to allocate for [d,t]indirect blocks */
891         indirect_blks = (chain + depth) - partial - 1;
892
893         /*
894          * Next look up the indirect map to count the totoal number of
895          * direct blocks to allocate for this branch.
896          */
897         count = ext4_blks_to_allocate(partial, indirect_blks,
898                                         maxblocks, blocks_to_boundary);
899         /*
900          * Block out ext4_truncate while we alter the tree
901          */
902         err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
903                                         &count, goal,
904                                         offsets + (partial - chain), partial);
905
906         /*
907          * The ext4_splice_branch call will free and forget any buffers
908          * on the new chain if there is a failure, but that risks using
909          * up transaction credits, especially for bitmaps where the
910          * credits cannot be returned.  Can we handle this somehow?  We
911          * may need to return -EAGAIN upwards in the worst case.  --sct
912          */
913         if (!err)
914                 err = ext4_splice_branch(handle, inode, iblock,
915                                         partial, indirect_blks, count);
916         /*
917          * i_disksize growing is protected by i_data_sem.  Don't forget to
918          * protect it if you're about to implement concurrent
919          * ext4_get_block() -bzzz
920         */
921         if (!err && extend_disksize && inode->i_size > ei->i_disksize)
922                 ei->i_disksize = inode->i_size;
923         if (err)
924                 goto cleanup;
925
926         set_buffer_new(bh_result);
927 got_it:
928         map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
929         if (count > blocks_to_boundary)
930                 set_buffer_boundary(bh_result);
931         err = count;
932         /* Clean up and exit */
933         partial = chain + depth - 1;    /* the whole chain */
934 cleanup:
935         while (partial > chain) {
936                 BUFFER_TRACE(partial->bh, "call brelse");
937                 brelse(partial->bh);
938                 partial--;
939         }
940         BUFFER_TRACE(bh_result, "returned");
941 out:
942         return err;
943 }
944
945 /* Maximum number of blocks we map for direct IO at once. */
946 #define DIO_MAX_BLOCKS 4096
947 /*
948  * Number of credits we need for writing DIO_MAX_BLOCKS:
949  * We need sb + group descriptor + bitmap + inode -> 4
950  * For B blocks with A block pointers per block we need:
951  * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
952  * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
953  */
954 #define DIO_CREDITS 25
955
956
957 /*
958  *
959  *
960  * ext4_ext4 get_block() wrapper function
961  * It will do a look up first, and returns if the blocks already mapped.
962  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
963  * and store the allocated blocks in the result buffer head and mark it
964  * mapped.
965  *
966  * If file type is extents based, it will call ext4_ext_get_blocks(),
967  * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
968  * based files
969  *
970  * On success, it returns the number of blocks being mapped or allocate.
971  * if create==0 and the blocks are pre-allocated and uninitialized block,
972  * the result buffer head is unmapped. If the create ==1, it will make sure
973  * the buffer head is mapped.
974  *
975  * It returns 0 if plain look up failed (blocks have not been allocated), in
976  * that casem, buffer head is unmapped
977  *
978  * It returns the error in case of allocation failure.
979  */
980 int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
981                         unsigned long max_blocks, struct buffer_head *bh,
982                         int create, int extend_disksize)
983 {
984         int retval;
985
986         clear_buffer_mapped(bh);
987
988         /*
989          * Try to see if we can get  the block without requesting
990          * for new file system block.
991          */
992         down_read((&EXT4_I(inode)->i_data_sem));
993         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
994                 retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
995                                 bh, 0, 0);
996         } else {
997                 retval = ext4_get_blocks_handle(handle,
998                                 inode, block, max_blocks, bh, 0, 0);
999         }
1000         up_read((&EXT4_I(inode)->i_data_sem));
1001
1002         /* If it is only a block(s) look up */
1003         if (!create)
1004                 return retval;
1005
1006         /*
1007          * Returns if the blocks have already allocated
1008          *
1009          * Note that if blocks have been preallocated
1010          * ext4_ext_get_block() returns th create = 0
1011          * with buffer head unmapped.
1012          */
1013         if (retval > 0 && buffer_mapped(bh))
1014                 return retval;
1015
1016         /*
1017          * New blocks allocate and/or writing to uninitialized extent
1018          * will possibly result in updating i_data, so we take
1019          * the write lock of i_data_sem, and call get_blocks()
1020          * with create == 1 flag.
1021          */
1022         down_write((&EXT4_I(inode)->i_data_sem));
1023         /*
1024          * We need to check for EXT4 here because migrate
1025          * could have changed the inode type in between
1026          */
1027         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1028                 retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
1029                                 bh, create, extend_disksize);
1030         } else {
1031                 retval = ext4_get_blocks_handle(handle, inode, block,
1032                                 max_blocks, bh, create, extend_disksize);
1033
1034                 if (retval > 0 && buffer_new(bh)) {
1035                         /*
1036                          * We allocated new blocks which will result in
1037                          * i_data's format changing.  Force the migrate
1038                          * to fail by clearing migrate flags
1039                          */
1040                         EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1041                                                         ~EXT4_EXT_MIGRATE;
1042                 }
1043         }
1044         up_write((&EXT4_I(inode)->i_data_sem));
1045         return retval;
1046 }
1047
1048 static int ext4_get_block(struct inode *inode, sector_t iblock,
1049                         struct buffer_head *bh_result, int create)
1050 {
1051         handle_t *handle = ext4_journal_current_handle();
1052         int ret = 0, started = 0;
1053         unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1054
1055         if (create && !handle) {
1056                 /* Direct IO write... */
1057                 if (max_blocks > DIO_MAX_BLOCKS)
1058                         max_blocks = DIO_MAX_BLOCKS;
1059                 handle = ext4_journal_start(inode, DIO_CREDITS +
1060                               2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
1061                 if (IS_ERR(handle)) {
1062                         ret = PTR_ERR(handle);
1063                         goto out;
1064                 }
1065                 started = 1;
1066         }
1067
1068         ret = ext4_get_blocks_wrap(handle, inode, iblock,
1069                                         max_blocks, bh_result, create, 0);
1070         if (ret > 0) {
1071                 bh_result->b_size = (ret << inode->i_blkbits);
1072                 ret = 0;
1073         }
1074         if (started)
1075                 ext4_journal_stop(handle);
1076 out:
1077         return ret;
1078 }
1079
1080 /*
1081  * `handle' can be NULL if create is zero
1082  */
1083 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1084                                 ext4_lblk_t block, int create, int *errp)
1085 {
1086         struct buffer_head dummy;
1087         int fatal = 0, err;
1088
1089         J_ASSERT(handle != NULL || create == 0);
1090
1091         dummy.b_state = 0;
1092         dummy.b_blocknr = -1000;
1093         buffer_trace_init(&dummy.b_history);
1094         err = ext4_get_blocks_wrap(handle, inode, block, 1,
1095                                         &dummy, create, 1);
1096         /*
1097          * ext4_get_blocks_handle() returns number of blocks
1098          * mapped. 0 in case of a HOLE.
1099          */
1100         if (err > 0) {
1101                 if (err > 1)
1102                         WARN_ON(1);
1103                 err = 0;
1104         }
1105         *errp = err;
1106         if (!err && buffer_mapped(&dummy)) {
1107                 struct buffer_head *bh;
1108                 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
1109                 if (!bh) {
1110                         *errp = -EIO;
1111                         goto err;
1112                 }
1113                 if (buffer_new(&dummy)) {
1114                         J_ASSERT(create != 0);
1115                         J_ASSERT(handle != NULL);
1116
1117                         /*
1118                          * Now that we do not always journal data, we should
1119                          * keep in mind whether this should always journal the
1120                          * new buffer as metadata.  For now, regular file
1121                          * writes use ext4_get_block instead, so it's not a
1122                          * problem.
1123                          */
1124                         lock_buffer(bh);
1125                         BUFFER_TRACE(bh, "call get_create_access");
1126                         fatal = ext4_journal_get_create_access(handle, bh);
1127                         if (!fatal && !buffer_uptodate(bh)) {
1128                                 memset(bh->b_data,0,inode->i_sb->s_blocksize);
1129                                 set_buffer_uptodate(bh);
1130                         }
1131                         unlock_buffer(bh);
1132                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
1133                         err = ext4_journal_dirty_metadata(handle, bh);
1134                         if (!fatal)
1135                                 fatal = err;
1136                 } else {
1137                         BUFFER_TRACE(bh, "not a new buffer");
1138                 }
1139                 if (fatal) {
1140                         *errp = fatal;
1141                         brelse(bh);
1142                         bh = NULL;
1143                 }
1144                 return bh;
1145         }
1146 err:
1147         return NULL;
1148 }
1149
1150 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
1151                                ext4_lblk_t block, int create, int *err)
1152 {
1153         struct buffer_head * bh;
1154
1155         bh = ext4_getblk(handle, inode, block, create, err);
1156         if (!bh)
1157                 return bh;
1158         if (buffer_uptodate(bh))
1159                 return bh;
1160         ll_rw_block(READ_META, 1, &bh);
1161         wait_on_buffer(bh);
1162         if (buffer_uptodate(bh))
1163                 return bh;
1164         put_bh(bh);
1165         *err = -EIO;
1166         return NULL;
1167 }
1168
1169 static int walk_page_buffers(   handle_t *handle,
1170                                 struct buffer_head *head,
1171                                 unsigned from,
1172                                 unsigned to,
1173                                 int *partial,
1174                                 int (*fn)(      handle_t *handle,
1175                                                 struct buffer_head *bh))
1176 {
1177         struct buffer_head *bh;
1178         unsigned block_start, block_end;
1179         unsigned blocksize = head->b_size;
1180         int err, ret = 0;
1181         struct buffer_head *next;
1182
1183         for (   bh = head, block_start = 0;
1184                 ret == 0 && (bh != head || !block_start);
1185                 block_start = block_end, bh = next)
1186         {
1187                 next = bh->b_this_page;
1188                 block_end = block_start + blocksize;
1189                 if (block_end <= from || block_start >= to) {
1190                         if (partial && !buffer_uptodate(bh))
1191                                 *partial = 1;
1192                         continue;
1193                 }
1194                 err = (*fn)(handle, bh);
1195                 if (!ret)
1196                         ret = err;
1197         }
1198         return ret;
1199 }
1200
1201 /*
1202  * To preserve ordering, it is essential that the hole instantiation and
1203  * the data write be encapsulated in a single transaction.  We cannot
1204  * close off a transaction and start a new one between the ext4_get_block()
1205  * and the commit_write().  So doing the jbd2_journal_start at the start of
1206  * prepare_write() is the right place.
1207  *
1208  * Also, this function can nest inside ext4_writepage() ->
1209  * block_write_full_page(). In that case, we *know* that ext4_writepage()
1210  * has generated enough buffer credits to do the whole page.  So we won't
1211  * block on the journal in that case, which is good, because the caller may
1212  * be PF_MEMALLOC.
1213  *
1214  * By accident, ext4 can be reentered when a transaction is open via
1215  * quota file writes.  If we were to commit the transaction while thus
1216  * reentered, there can be a deadlock - we would be holding a quota
1217  * lock, and the commit would never complete if another thread had a
1218  * transaction open and was blocking on the quota lock - a ranking
1219  * violation.
1220  *
1221  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1222  * will _not_ run commit under these circumstances because handle->h_ref
1223  * is elevated.  We'll still have enough credits for the tiny quotafile
1224  * write.
1225  */
1226 static int do_journal_get_write_access(handle_t *handle,
1227                                         struct buffer_head *bh)
1228 {
1229         if (!buffer_mapped(bh) || buffer_freed(bh))
1230                 return 0;
1231         return ext4_journal_get_write_access(handle, bh);
1232 }
1233
1234 static int ext4_write_begin(struct file *file, struct address_space *mapping,
1235                                 loff_t pos, unsigned len, unsigned flags,
1236                                 struct page **pagep, void **fsdata)
1237 {
1238         struct inode *inode = mapping->host;
1239         int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
1240         handle_t *handle;
1241         int retries = 0;
1242         struct page *page;
1243         pgoff_t index;
1244         unsigned from, to;
1245
1246         index = pos >> PAGE_CACHE_SHIFT;
1247         from = pos & (PAGE_CACHE_SIZE - 1);
1248         to = from + len;
1249
1250 retry:
1251         handle = ext4_journal_start(inode, needed_blocks);
1252         if (IS_ERR(handle)) {
1253                 ret = PTR_ERR(handle);
1254                 goto out;
1255         }
1256
1257         page = __grab_cache_page(mapping, index);
1258         if (!page) {
1259                 ext4_journal_stop(handle);
1260                 ret = -ENOMEM;
1261                 goto out;
1262         }
1263         *pagep = page;
1264
1265         ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
1266                                                         ext4_get_block);
1267
1268         if (!ret && ext4_should_journal_data(inode)) {
1269                 ret = walk_page_buffers(handle, page_buffers(page),
1270                                 from, to, NULL, do_journal_get_write_access);
1271         }
1272
1273         if (ret) {
1274                 unlock_page(page);
1275                 ext4_journal_stop(handle);
1276                 page_cache_release(page);
1277         }
1278
1279         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
1280                 goto retry;
1281 out:
1282         return ret;
1283 }
1284
1285 /* For write_end() in data=journal mode */
1286 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1287 {
1288         if (!buffer_mapped(bh) || buffer_freed(bh))
1289                 return 0;
1290         set_buffer_uptodate(bh);
1291         return ext4_journal_dirty_metadata(handle, bh);
1292 }
1293
1294 /*
1295  * We need to pick up the new inode size which generic_commit_write gave us
1296  * `file' can be NULL - eg, when called from page_symlink().
1297  *
1298  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1299  * buffers are managed internally.
1300  */
1301 static int ext4_ordered_write_end(struct file *file,
1302                                 struct address_space *mapping,
1303                                 loff_t pos, unsigned len, unsigned copied,
1304                                 struct page *page, void *fsdata)
1305 {
1306         handle_t *handle = ext4_journal_current_handle();
1307         struct inode *inode = mapping->host;
1308         unsigned from, to;
1309         int ret = 0, ret2;
1310
1311         from = pos & (PAGE_CACHE_SIZE - 1);
1312         to = from + len;
1313
1314         ret = ext4_jbd2_file_inode(handle, inode);
1315
1316         if (ret == 0) {
1317                 /*
1318                  * generic_write_end() will run mark_inode_dirty() if i_size
1319                  * changes.  So let's piggyback the i_disksize mark_inode_dirty
1320                  * into that.
1321                  */
1322                 loff_t new_i_size;
1323
1324                 new_i_size = pos + copied;
1325                 if (new_i_size > EXT4_I(inode)->i_disksize)
1326                         EXT4_I(inode)->i_disksize = new_i_size;
1327                 ret2 = generic_write_end(file, mapping, pos, len, copied,
1328                                                         page, fsdata);
1329                 copied = ret2;
1330                 if (ret2 < 0)
1331                         ret = ret2;
1332         }
1333         ret2 = ext4_journal_stop(handle);
1334         if (!ret)
1335                 ret = ret2;
1336
1337         return ret ? ret : copied;
1338 }
1339
1340 static int ext4_writeback_write_end(struct file *file,
1341                                 struct address_space *mapping,
1342                                 loff_t pos, unsigned len, unsigned copied,
1343                                 struct page *page, void *fsdata)
1344 {
1345         handle_t *handle = ext4_journal_current_handle();
1346         struct inode *inode = mapping->host;
1347         int ret = 0, ret2;
1348         loff_t new_i_size;
1349
1350         new_i_size = pos + copied;
1351         if (new_i_size > EXT4_I(inode)->i_disksize)
1352                 EXT4_I(inode)->i_disksize = new_i_size;
1353
1354         ret2 = generic_write_end(file, mapping, pos, len, copied,
1355                                                         page, fsdata);
1356         copied = ret2;
1357         if (ret2 < 0)
1358                 ret = ret2;
1359
1360         ret2 = ext4_journal_stop(handle);
1361         if (!ret)
1362                 ret = ret2;
1363
1364         return ret ? ret : copied;
1365 }
1366
1367 static int ext4_journalled_write_end(struct file *file,
1368                                 struct address_space *mapping,
1369                                 loff_t pos, unsigned len, unsigned copied,
1370                                 struct page *page, void *fsdata)
1371 {
1372         handle_t *handle = ext4_journal_current_handle();
1373         struct inode *inode = mapping->host;
1374         int ret = 0, ret2;
1375         int partial = 0;
1376         unsigned from, to;
1377
1378         from = pos & (PAGE_CACHE_SIZE - 1);
1379         to = from + len;
1380
1381         if (copied < len) {
1382                 if (!PageUptodate(page))
1383                         copied = 0;
1384                 page_zero_new_buffers(page, from+copied, to);
1385         }
1386
1387         ret = walk_page_buffers(handle, page_buffers(page), from,
1388                                 to, &partial, write_end_fn);
1389         if (!partial)
1390                 SetPageUptodate(page);
1391         if (pos+copied > inode->i_size)
1392                 i_size_write(inode, pos+copied);
1393         EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1394         if (inode->i_size > EXT4_I(inode)->i_disksize) {
1395                 EXT4_I(inode)->i_disksize = inode->i_size;
1396                 ret2 = ext4_mark_inode_dirty(handle, inode);
1397                 if (!ret)
1398                         ret = ret2;
1399         }
1400
1401         unlock_page(page);
1402         ret2 = ext4_journal_stop(handle);
1403         if (!ret)
1404                 ret = ret2;
1405         page_cache_release(page);
1406
1407         return ret ? ret : copied;
1408 }
1409
1410 /*
1411  * bmap() is special.  It gets used by applications such as lilo and by
1412  * the swapper to find the on-disk block of a specific piece of data.
1413  *
1414  * Naturally, this is dangerous if the block concerned is still in the
1415  * journal.  If somebody makes a swapfile on an ext4 data-journaling
1416  * filesystem and enables swap, then they may get a nasty shock when the
1417  * data getting swapped to that swapfile suddenly gets overwritten by
1418  * the original zero's written out previously to the journal and
1419  * awaiting writeback in the kernel's buffer cache.
1420  *
1421  * So, if we see any bmap calls here on a modified, data-journaled file,
1422  * take extra steps to flush any blocks which might be in the cache.
1423  */
1424 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
1425 {
1426         struct inode *inode = mapping->host;
1427         journal_t *journal;
1428         int err;
1429
1430         if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
1431                 /*
1432                  * This is a REALLY heavyweight approach, but the use of
1433                  * bmap on dirty files is expected to be extremely rare:
1434                  * only if we run lilo or swapon on a freshly made file
1435                  * do we expect this to happen.
1436                  *
1437                  * (bmap requires CAP_SYS_RAWIO so this does not
1438                  * represent an unprivileged user DOS attack --- we'd be
1439                  * in trouble if mortal users could trigger this path at
1440                  * will.)
1441                  *
1442                  * NB. EXT4_STATE_JDATA is not set on files other than
1443                  * regular files.  If somebody wants to bmap a directory
1444                  * or symlink and gets confused because the buffer
1445                  * hasn't yet been flushed to disk, they deserve
1446                  * everything they get.
1447                  */
1448
1449                 EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
1450                 journal = EXT4_JOURNAL(inode);
1451                 jbd2_journal_lock_updates(journal);
1452                 err = jbd2_journal_flush(journal);
1453                 jbd2_journal_unlock_updates(journal);
1454
1455                 if (err)
1456                         return 0;
1457         }
1458
1459         return generic_block_bmap(mapping,block,ext4_get_block);
1460 }
1461
1462 static int bget_one(handle_t *handle, struct buffer_head *bh)
1463 {
1464         get_bh(bh);
1465         return 0;
1466 }
1467
1468 static int bput_one(handle_t *handle, struct buffer_head *bh)
1469 {
1470         put_bh(bh);
1471         return 0;
1472 }
1473
1474 static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
1475 {
1476         return !buffer_mapped(bh) || buffer_delay(bh);
1477 }
1478
1479 /*
1480  * Note that we don't need to start a transaction unless we're journaling data
1481  * because we should have holes filled from ext4_page_mkwrite(). We even don't
1482  * need to file the inode to the transaction's list in ordered mode because if
1483  * we are writing back data added by write(), the inode is already there and if
1484  * we are writing back data modified via mmap(), noone guarantees in which
1485  * transaction the data will hit the disk. In case we are journaling data, we
1486  * cannot start transaction directly because transaction start ranks above page
1487  * lock so we have to do some magic.
1488  *
1489  * In all journaling modes block_write_full_page() will start the I/O.
1490  *
1491  * Problem:
1492  *
1493  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1494  *              ext4_writepage()
1495  *
1496  * Similar for:
1497  *
1498  *      ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
1499  *
1500  * Same applies to ext4_get_block().  We will deadlock on various things like
1501  * lock_journal and i_data_sem
1502  *
1503  * Setting PF_MEMALLOC here doesn't work - too many internal memory
1504  * allocations fail.
1505  *
1506  * 16May01: If we're reentered then journal_current_handle() will be
1507  *          non-zero. We simply *return*.
1508  *
1509  * 1 July 2001: @@@ FIXME:
1510  *   In journalled data mode, a data buffer may be metadata against the
1511  *   current transaction.  But the same file is part of a shared mapping
1512  *   and someone does a writepage() on it.
1513  *
1514  *   We will move the buffer onto the async_data list, but *after* it has
1515  *   been dirtied. So there's a small window where we have dirty data on
1516  *   BJ_Metadata.
1517  *
1518  *   Note that this only applies to the last partial page in the file.  The
1519  *   bit which block_write_full_page() uses prepare/commit for.  (That's
1520  *   broken code anyway: it's wrong for msync()).
1521  *
1522  *   It's a rare case: affects the final partial page, for journalled data
1523  *   where the file is subject to bith write() and writepage() in the same
1524  *   transction.  To fix it we'll need a custom block_write_full_page().
1525  *   We'll probably need that anyway for journalling writepage() output.
1526  *
1527  * We don't honour synchronous mounts for writepage().  That would be
1528  * disastrous.  Any write() or metadata operation will sync the fs for
1529  * us.
1530  *
1531  */
1532 static int __ext4_normal_writepage(struct page *page,
1533                                 struct writeback_control *wbc)
1534 {
1535         struct inode *inode = page->mapping->host;
1536
1537         if (test_opt(inode->i_sb, NOBH))
1538                 return nobh_writepage(page, ext4_get_block, wbc);
1539         else
1540                 return block_write_full_page(page, ext4_get_block, wbc);
1541 }
1542
1543
1544 static int ext4_normal_writepage(struct page *page,
1545                                 struct writeback_control *wbc)
1546 {
1547         struct inode *inode = page->mapping->host;
1548         loff_t size = i_size_read(inode);
1549         loff_t len;
1550
1551         J_ASSERT(PageLocked(page));
1552         J_ASSERT(page_has_buffers(page));
1553         if (page->index == size >> PAGE_CACHE_SHIFT)
1554                 len = size & ~PAGE_CACHE_MASK;
1555         else
1556                 len = PAGE_CACHE_SIZE;
1557         BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
1558                                  ext4_bh_unmapped_or_delay));
1559
1560         if (!ext4_journal_current_handle())
1561                 return __ext4_normal_writepage(page, wbc);
1562
1563         redirty_page_for_writepage(wbc, page);
1564         unlock_page(page);
1565         return 0;
1566 }
1567
1568 static int __ext4_journalled_writepage(struct page *page,
1569                                 struct writeback_control *wbc)
1570 {
1571         struct address_space *mapping = page->mapping;
1572         struct inode *inode = mapping->host;
1573         struct buffer_head *page_bufs;
1574         handle_t *handle = NULL;
1575         int ret = 0;
1576         int err;
1577
1578         ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE, ext4_get_block);
1579         if (ret != 0)
1580                 goto out_unlock;
1581
1582         page_bufs = page_buffers(page);
1583         walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
1584                                                                 bget_one);
1585         /* As soon as we unlock the page, it can go away, but we have
1586          * references to buffers so we are safe */
1587         unlock_page(page);
1588
1589         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1590         if (IS_ERR(handle)) {
1591                 ret = PTR_ERR(handle);
1592                 goto out;
1593         }
1594
1595         ret = walk_page_buffers(handle, page_bufs, 0,
1596                         PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
1597
1598         err = walk_page_buffers(handle, page_bufs, 0,
1599                                 PAGE_CACHE_SIZE, NULL, write_end_fn);
1600         if (ret == 0)
1601                 ret = err;
1602         err = ext4_journal_stop(handle);
1603         if (!ret)
1604                 ret = err;
1605
1606         walk_page_buffers(handle, page_bufs, 0,
1607                                 PAGE_CACHE_SIZE, NULL, bput_one);
1608         EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
1609         goto out;
1610
1611 out_unlock:
1612         unlock_page(page);
1613 out:
1614         return ret;
1615 }
1616
1617 static int ext4_journalled_writepage(struct page *page,
1618                                 struct writeback_control *wbc)
1619 {
1620         struct inode *inode = page->mapping->host;
1621         loff_t size = i_size_read(inode);
1622         loff_t len;
1623
1624         J_ASSERT(PageLocked(page));
1625         J_ASSERT(page_has_buffers(page));
1626         if (page->index == size >> PAGE_CACHE_SHIFT)
1627                 len = size & ~PAGE_CACHE_MASK;
1628         else
1629                 len = PAGE_CACHE_SIZE;
1630         BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
1631                                  ext4_bh_unmapped_or_delay));
1632
1633         if (ext4_journal_current_handle())
1634                 goto no_write;
1635
1636         if (PageChecked(page)) {
1637                 /*
1638                  * It's mmapped pagecache.  Add buffers and journal it.  There
1639                  * doesn't seem much point in redirtying the page here.
1640                  */
1641                 ClearPageChecked(page);
1642                 return __ext4_journalled_writepage(page, wbc);
1643         } else {
1644                 /*
1645                  * It may be a page full of checkpoint-mode buffers.  We don't
1646                  * really know unless we go poke around in the buffer_heads.
1647                  * But block_write_full_page will do the right thing.
1648                  */
1649                 return block_write_full_page(page, ext4_get_block, wbc);
1650         }
1651 no_write:
1652         redirty_page_for_writepage(wbc, page);
1653         unlock_page(page);
1654         return 0;
1655 }
1656
1657 static int ext4_readpage(struct file *file, struct page *page)
1658 {
1659         return mpage_readpage(page, ext4_get_block);
1660 }
1661
1662 static int
1663 ext4_readpages(struct file *file, struct address_space *mapping,
1664                 struct list_head *pages, unsigned nr_pages)
1665 {
1666         return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
1667 }
1668
1669 static void ext4_invalidatepage(struct page *page, unsigned long offset)
1670 {
1671         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1672
1673         /*
1674          * If it's a full truncate we just forget about the pending dirtying
1675          */
1676         if (offset == 0)
1677                 ClearPageChecked(page);
1678
1679         jbd2_journal_invalidatepage(journal, page, offset);
1680 }
1681
1682 static int ext4_releasepage(struct page *page, gfp_t wait)
1683 {
1684         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
1685
1686         WARN_ON(PageChecked(page));
1687         if (!page_has_buffers(page))
1688                 return 0;
1689         return jbd2_journal_try_to_free_buffers(journal, page, wait);
1690 }
1691
1692 /*
1693  * If the O_DIRECT write will extend the file then add this inode to the
1694  * orphan list.  So recovery will truncate it back to the original size
1695  * if the machine crashes during the write.
1696  *
1697  * If the O_DIRECT write is intantiating holes inside i_size and the machine
1698  * crashes then stale disk data _may_ be exposed inside the file. But current
1699  * VFS code falls back into buffered path in that case so we are safe.
1700  */
1701 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
1702                         const struct iovec *iov, loff_t offset,
1703                         unsigned long nr_segs)
1704 {
1705         struct file *file = iocb->ki_filp;
1706         struct inode *inode = file->f_mapping->host;
1707         struct ext4_inode_info *ei = EXT4_I(inode);
1708         handle_t *handle;
1709         ssize_t ret;
1710         int orphan = 0;
1711         size_t count = iov_length(iov, nr_segs);
1712
1713         if (rw == WRITE) {
1714                 loff_t final_size = offset + count;
1715
1716                 if (final_size > inode->i_size) {
1717                         /* Credits for sb + inode write */
1718                         handle = ext4_journal_start(inode, 2);
1719                         if (IS_ERR(handle)) {
1720                                 ret = PTR_ERR(handle);
1721                                 goto out;
1722                         }
1723                         ret = ext4_orphan_add(handle, inode);
1724                         if (ret) {
1725                                 ext4_journal_stop(handle);
1726                                 goto out;
1727                         }
1728                         orphan = 1;
1729                         ei->i_disksize = inode->i_size;
1730                         ext4_journal_stop(handle);
1731                 }
1732         }
1733
1734         ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1735                                  offset, nr_segs,
1736                                  ext4_get_block, NULL);
1737
1738         if (orphan) {
1739                 int err;
1740
1741                 /* Credits for sb + inode write */
1742                 handle = ext4_journal_start(inode, 2);
1743                 if (IS_ERR(handle)) {
1744                         /* This is really bad luck. We've written the data
1745                          * but cannot extend i_size. Bail out and pretend
1746                          * the write failed... */
1747                         ret = PTR_ERR(handle);
1748                         goto out;
1749                 }
1750                 if (inode->i_nlink)
1751                         ext4_orphan_del(handle, inode);
1752                 if (ret > 0) {
1753                         loff_t end = offset + ret;
1754                         if (end > inode->i_size) {
1755                                 ei->i_disksize = end;
1756                                 i_size_write(inode, end);
1757                                 /*
1758                                  * We're going to return a positive `ret'
1759                                  * here due to non-zero-length I/O, so there's
1760                                  * no way of reporting error returns from
1761                                  * ext4_mark_inode_dirty() to userspace.  So
1762                                  * ignore it.
1763                                  */
1764                                 ext4_mark_inode_dirty(handle, inode);
1765                         }
1766                 }
1767                 err = ext4_journal_stop(handle);
1768                 if (ret == 0)
1769                         ret = err;
1770         }
1771 out:
1772         return ret;
1773 }
1774
1775 /*
1776  * Pages can be marked dirty completely asynchronously from ext4's journalling
1777  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
1778  * much here because ->set_page_dirty is called under VFS locks.  The page is
1779  * not necessarily locked.
1780  *
1781  * We cannot just dirty the page and leave attached buffers clean, because the
1782  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
1783  * or jbddirty because all the journalling code will explode.
1784  *
1785  * So what we do is to mark the page "pending dirty" and next time writepage
1786  * is called, propagate that into the buffers appropriately.
1787  */
1788 static int ext4_journalled_set_page_dirty(struct page *page)
1789 {
1790         SetPageChecked(page);
1791         return __set_page_dirty_nobuffers(page);
1792 }
1793
1794 static const struct address_space_operations ext4_ordered_aops = {
1795         .readpage       = ext4_readpage,
1796         .readpages      = ext4_readpages,
1797         .writepage      = ext4_normal_writepage,
1798         .sync_page      = block_sync_page,
1799         .write_begin    = ext4_write_begin,
1800         .write_end      = ext4_ordered_write_end,
1801         .bmap           = ext4_bmap,
1802         .invalidatepage = ext4_invalidatepage,
1803         .releasepage    = ext4_releasepage,
1804         .direct_IO      = ext4_direct_IO,
1805         .migratepage    = buffer_migrate_page,
1806 };
1807
1808 static const struct address_space_operations ext4_writeback_aops = {
1809         .readpage       = ext4_readpage,
1810         .readpages      = ext4_readpages,
1811         .writepage      = ext4_normal_writepage,
1812         .sync_page      = block_sync_page,
1813         .write_begin    = ext4_write_begin,
1814         .write_end      = ext4_writeback_write_end,
1815         .bmap           = ext4_bmap,
1816         .invalidatepage = ext4_invalidatepage,
1817         .releasepage    = ext4_releasepage,
1818         .direct_IO      = ext4_direct_IO,
1819         .migratepage    = buffer_migrate_page,
1820 };
1821
1822 static const struct address_space_operations ext4_journalled_aops = {
1823         .readpage       = ext4_readpage,
1824         .readpages      = ext4_readpages,
1825         .writepage      = ext4_journalled_writepage,
1826         .sync_page      = block_sync_page,
1827         .write_begin    = ext4_write_begin,
1828         .write_end      = ext4_journalled_write_end,
1829         .set_page_dirty = ext4_journalled_set_page_dirty,
1830         .bmap           = ext4_bmap,
1831         .invalidatepage = ext4_invalidatepage,
1832         .releasepage    = ext4_releasepage,
1833 };
1834
1835 void ext4_set_aops(struct inode *inode)
1836 {
1837         if (ext4_should_order_data(inode))
1838                 inode->i_mapping->a_ops = &ext4_ordered_aops;
1839         else if (ext4_should_writeback_data(inode))
1840                 inode->i_mapping->a_ops = &ext4_writeback_aops;
1841         else
1842                 inode->i_mapping->a_ops = &ext4_journalled_aops;
1843 }
1844
1845 /*
1846  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
1847  * up to the end of the block which corresponds to `from'.
1848  * This required during truncate. We need to physically zero the tail end
1849  * of that block so it doesn't yield old data if the file is later grown.
1850  */
1851 int ext4_block_truncate_page(handle_t *handle,
1852                 struct address_space *mapping, loff_t from)
1853 {
1854         ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
1855         unsigned offset = from & (PAGE_CACHE_SIZE-1);
1856         unsigned blocksize, length, pos;
1857         ext4_lblk_t iblock;
1858         struct inode *inode = mapping->host;
1859         struct buffer_head *bh;
1860         struct page *page;
1861         int err = 0;
1862
1863         page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
1864         if (!page)
1865                 return -EINVAL;
1866
1867         blocksize = inode->i_sb->s_blocksize;
1868         length = blocksize - (offset & (blocksize - 1));
1869         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
1870
1871         /*
1872          * For "nobh" option,  we can only work if we don't need to
1873          * read-in the page - otherwise we create buffers to do the IO.
1874          */
1875         if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
1876              ext4_should_writeback_data(inode) && PageUptodate(page)) {
1877                 zero_user(page, offset, length);
1878                 set_page_dirty(page);
1879                 goto unlock;
1880         }
1881
1882         if (!page_has_buffers(page))
1883                 create_empty_buffers(page, blocksize, 0);
1884
1885         /* Find the buffer that contains "offset" */
1886         bh = page_buffers(page);
1887         pos = blocksize;
1888         while (offset >= pos) {
1889                 bh = bh->b_this_page;
1890                 iblock++;
1891                 pos += blocksize;
1892         }
1893
1894         err = 0;
1895         if (buffer_freed(bh)) {
1896                 BUFFER_TRACE(bh, "freed: skip");
1897                 goto unlock;
1898         }
1899
1900         if (!buffer_mapped(bh)) {
1901                 BUFFER_TRACE(bh, "unmapped");
1902                 ext4_get_block(inode, iblock, bh, 0);
1903                 /* unmapped? It's a hole - nothing to do */
1904                 if (!buffer_mapped(bh)) {
1905                         BUFFER_TRACE(bh, "still unmapped");
1906                         goto unlock;
1907                 }
1908         }
1909
1910         /* Ok, it's mapped. Make sure it's up-to-date */
1911         if (PageUptodate(page))
1912                 set_buffer_uptodate(bh);
1913
1914         if (!buffer_uptodate(bh)) {
1915                 err = -EIO;
1916                 ll_rw_block(READ, 1, &bh);
1917                 wait_on_buffer(bh);
1918                 /* Uhhuh. Read error. Complain and punt. */
1919                 if (!buffer_uptodate(bh))
1920                         goto unlock;
1921         }
1922
1923         if (ext4_should_journal_data(inode)) {
1924                 BUFFER_TRACE(bh, "get write access");
1925                 err = ext4_journal_get_write_access(handle, bh);
1926                 if (err)
1927                         goto unlock;
1928         }
1929
1930         zero_user(page, offset, length);
1931
1932         BUFFER_TRACE(bh, "zeroed end of block");
1933
1934         err = 0;
1935         if (ext4_should_journal_data(inode)) {
1936                 err = ext4_journal_dirty_metadata(handle, bh);
1937         } else {
1938                 if (ext4_should_order_data(inode))
1939                         err = ext4_jbd2_file_inode(handle, inode);
1940                 mark_buffer_dirty(bh);
1941         }
1942
1943 unlock:
1944         unlock_page(page);
1945         page_cache_release(page);
1946         return err;
1947 }
1948
1949 /*
1950  * Probably it should be a library function... search for first non-zero word
1951  * or memcmp with zero_page, whatever is better for particular architecture.
1952  * Linus?
1953  */
1954 static inline int all_zeroes(__le32 *p, __le32 *q)
1955 {
1956         while (p < q)
1957                 if (*p++)
1958                         return 0;
1959         return 1;
1960 }
1961
1962 /**
1963  *      ext4_find_shared - find the indirect blocks for partial truncation.
1964  *      @inode:   inode in question
1965  *      @depth:   depth of the affected branch
1966  *      @offsets: offsets of pointers in that branch (see ext4_block_to_path)
1967  *      @chain:   place to store the pointers to partial indirect blocks
1968  *      @top:     place to the (detached) top of branch
1969  *
1970  *      This is a helper function used by ext4_truncate().
1971  *
1972  *      When we do truncate() we may have to clean the ends of several
1973  *      indirect blocks but leave the blocks themselves alive. Block is
1974  *      partially truncated if some data below the new i_size is refered
1975  *      from it (and it is on the path to the first completely truncated
1976  *      data block, indeed).  We have to free the top of that path along
1977  *      with everything to the right of the path. Since no allocation
1978  *      past the truncation point is possible until ext4_truncate()
1979  *      finishes, we may safely do the latter, but top of branch may
1980  *      require special attention - pageout below the truncation point
1981  *      might try to populate it.
1982  *
1983  *      We atomically detach the top of branch from the tree, store the
1984  *      block number of its root in *@top, pointers to buffer_heads of
1985  *      partially truncated blocks - in @chain[].bh and pointers to
1986  *      their last elements that should not be removed - in
1987  *      @chain[].p. Return value is the pointer to last filled element
1988  *      of @chain.
1989  *
1990  *      The work left to caller to do the actual freeing of subtrees:
1991  *              a) free the subtree starting from *@top
1992  *              b) free the subtrees whose roots are stored in
1993  *                      (@chain[i].p+1 .. end of @chain[i].bh->b_data)
1994  *              c) free the subtrees growing from the inode past the @chain[0].
1995  *                      (no partially truncated stuff there).  */
1996
1997 static Indirect *ext4_find_shared(struct inode *inode, int depth,
1998                         ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
1999 {
2000         Indirect *partial, *p;
2001         int k, err;
2002
2003         *top = 0;
2004         /* Make k index the deepest non-null offest + 1 */
2005         for (k = depth; k > 1 && !offsets[k-1]; k--)
2006                 ;
2007         partial = ext4_get_branch(inode, k, offsets, chain, &err);
2008         /* Writer: pointers */
2009         if (!partial)
2010                 partial = chain + k-1;
2011         /*
2012          * If the branch acquired continuation since we've looked at it -
2013          * fine, it should all survive and (new) top doesn't belong to us.
2014          */
2015         if (!partial->key && *partial->p)
2016                 /* Writer: end */
2017                 goto no_top;
2018         for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
2019                 ;
2020         /*
2021          * OK, we've found the last block that must survive. The rest of our
2022          * branch should be detached before unlocking. However, if that rest
2023          * of branch is all ours and does not grow immediately from the inode
2024          * it's easier to cheat and just decrement partial->p.
2025          */
2026         if (p == chain + k - 1 && p > chain) {
2027                 p->p--;
2028         } else {
2029                 *top = *p->p;
2030                 /* Nope, don't do this in ext4.  Must leave the tree intact */
2031 #if 0
2032                 *p->p = 0;
2033 #endif
2034         }
2035         /* Writer: end */
2036
2037         while(partial > p) {
2038                 brelse(partial->bh);
2039                 partial--;
2040         }
2041 no_top:
2042         return partial;
2043 }
2044
2045 /*
2046  * Zero a number of block pointers in either an inode or an indirect block.
2047  * If we restart the transaction we must again get write access to the
2048  * indirect block for further modification.
2049  *
2050  * We release `count' blocks on disk, but (last - first) may be greater
2051  * than `count' because there can be holes in there.
2052  */
2053 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
2054                 struct buffer_head *bh, ext4_fsblk_t block_to_free,
2055                 unsigned long count, __le32 *first, __le32 *last)
2056 {
2057         __le32 *p;
2058         if (try_to_extend_transaction(handle, inode)) {
2059                 if (bh) {
2060                         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
2061                         ext4_journal_dirty_metadata(handle, bh);
2062                 }
2063                 ext4_mark_inode_dirty(handle, inode);
2064                 ext4_journal_test_restart(handle, inode);
2065                 if (bh) {
2066                         BUFFER_TRACE(bh, "retaking write access");
2067                         ext4_journal_get_write_access(handle, bh);
2068                 }
2069         }
2070
2071         /*
2072          * Any buffers which are on the journal will be in memory. We find
2073          * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
2074          * on them.  We've already detached each block from the file, so
2075          * bforget() in jbd2_journal_forget() should be safe.
2076          *
2077          * AKPM: turn on bforget in jbd2_journal_forget()!!!
2078          */
2079         for (p = first; p < last; p++) {
2080                 u32 nr = le32_to_cpu(*p);
2081                 if (nr) {
2082                         struct buffer_head *tbh;
2083
2084                         *p = 0;
2085                         tbh = sb_find_get_block(inode->i_sb, nr);
2086                         ext4_forget(handle, 0, inode, tbh, nr);
2087                 }
2088         }
2089
2090         ext4_free_blocks(handle, inode, block_to_free, count, 0);
2091 }
2092
2093 /**
2094  * ext4_free_data - free a list of data blocks
2095  * @handle:     handle for this transaction
2096  * @inode:      inode we are dealing with
2097  * @this_bh:    indirect buffer_head which contains *@first and *@last
2098  * @first:      array of block numbers
2099  * @last:       points immediately past the end of array
2100  *
2101  * We are freeing all blocks refered from that array (numbers are stored as
2102  * little-endian 32-bit) and updating @inode->i_blocks appropriately.
2103  *
2104  * We accumulate contiguous runs of blocks to free.  Conveniently, if these
2105  * blocks are contiguous then releasing them at one time will only affect one
2106  * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't
2107  * actually use a lot of journal space.
2108  *
2109  * @this_bh will be %NULL if @first and @last point into the inode's direct
2110  * block pointers.
2111  */
2112 static void ext4_free_data(handle_t *handle, struct inode *inode,
2113                            struct buffer_head *this_bh,
2114                            __le32 *first, __le32 *last)
2115 {
2116         ext4_fsblk_t block_to_free = 0;    /* Starting block # of a run */
2117         unsigned long count = 0;            /* Number of blocks in the run */
2118         __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
2119                                                corresponding to
2120                                                block_to_free */
2121         ext4_fsblk_t nr;                    /* Current block # */
2122         __le32 *p;                          /* Pointer into inode/ind
2123                                                for current block */
2124         int err;
2125
2126         if (this_bh) {                          /* For indirect block */
2127                 BUFFER_TRACE(this_bh, "get_write_access");
2128                 err = ext4_journal_get_write_access(handle, this_bh);
2129                 /* Important: if we can't update the indirect pointers
2130                  * to the blocks, we can't free them. */
2131                 if (err)
2132                         return;
2133         }
2134
2135         for (p = first; p < last; p++) {
2136                 nr = le32_to_cpu(*p);
2137                 if (nr) {
2138                         /* accumulate blocks to free if they're contiguous */
2139                         if (count == 0) {
2140                                 block_to_free = nr;
2141                                 block_to_free_p = p;
2142                                 count = 1;
2143                         } else if (nr == block_to_free + count) {
2144                                 count++;
2145                         } else {
2146                                 ext4_clear_blocks(handle, inode, this_bh,
2147                                                   block_to_free,
2148                                                   count, block_to_free_p, p);
2149                                 block_to_free = nr;
2150                                 block_to_free_p = p;
2151                                 count = 1;
2152                         }
2153                 }
2154         }
2155
2156         if (count > 0)
2157                 ext4_clear_blocks(handle, inode, this_bh, block_to_free,
2158                                   count, block_to_free_p, p);
2159
2160         if (this_bh) {
2161                 BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata");
2162
2163                 /*
2164                  * The buffer head should have an attached journal head at this
2165                  * point. However, if the data is corrupted and an indirect
2166                  * block pointed to itself, it would have been detached when
2167                  * the block was cleared. Check for this instead of OOPSing.
2168                  */
2169                 if (bh2jh(this_bh))
2170                         ext4_journal_dirty_metadata(handle, this_bh);
2171                 else
2172                         ext4_error(inode->i_sb, __func__,
2173                                    "circular indirect block detected, "
2174                                    "inode=%lu, block=%llu",
2175                                    inode->i_ino,
2176                                    (unsigned long long) this_bh->b_blocknr);
2177         }
2178 }
2179
2180 /**
2181  *      ext4_free_branches - free an array of branches
2182  *      @handle: JBD handle for this transaction
2183  *      @inode: inode we are dealing with
2184  *      @parent_bh: the buffer_head which contains *@first and *@last
2185  *      @first: array of block numbers
2186  *      @last:  pointer immediately past the end of array
2187  *      @depth: depth of the branches to free
2188  *
2189  *      We are freeing all blocks refered from these branches (numbers are
2190  *      stored as little-endian 32-bit) and updating @inode->i_blocks
2191  *      appropriately.
2192  */
2193 static void ext4_free_branches(handle_t *handle, struct inode *inode,
2194                                struct buffer_head *parent_bh,
2195                                __le32 *first, __le32 *last, int depth)
2196 {
2197         ext4_fsblk_t nr;
2198         __le32 *p;
2199
2200         if (is_handle_aborted(handle))
2201                 return;
2202
2203         if (depth--) {
2204                 struct buffer_head *bh;
2205                 int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2206                 p = last;
2207                 while (--p >= first) {
2208                         nr = le32_to_cpu(*p);
2209                         if (!nr)
2210                                 continue;               /* A hole */
2211
2212                         /* Go read the buffer for the next level down */
2213                         bh = sb_bread(inode->i_sb, nr);
2214
2215                         /*
2216                          * A read failure? Report error and clear slot
2217                          * (should be rare).
2218                          */
2219                         if (!bh) {
2220                                 ext4_error(inode->i_sb, "ext4_free_branches",
2221                                            "Read failure, inode=%lu, block=%llu",
2222                                            inode->i_ino, nr);
2223                                 continue;
2224                         }
2225
2226                         /* This zaps the entire block.  Bottom up. */
2227                         BUFFER_TRACE(bh, "free child branches");
2228                         ext4_free_branches(handle, inode, bh,
2229                                            (__le32*)bh->b_data,
2230                                            (__le32*)bh->b_data + addr_per_block,
2231                                            depth);
2232
2233                         /*
2234                          * We've probably journalled the indirect block several
2235                          * times during the truncate.  But it's no longer
2236                          * needed and we now drop it from the transaction via
2237                          * jbd2_journal_revoke().
2238                          *
2239                          * That's easy if it's exclusively part of this
2240                          * transaction.  But if it's part of the committing
2241                          * transaction then jbd2_journal_forget() will simply
2242                          * brelse() it.  That means that if the underlying
2243                          * block is reallocated in ext4_get_block(),
2244                          * unmap_underlying_metadata() will find this block
2245                          * and will try to get rid of it.  damn, damn.
2246                          *
2247                          * If this block has already been committed to the
2248                          * journal, a revoke record will be written.  And
2249                          * revoke records must be emitted *before* clearing
2250                          * this block's bit in the bitmaps.
2251                          */
2252                         ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
2253
2254                         /*
2255                          * Everything below this this pointer has been
2256                          * released.  Now let this top-of-subtree go.
2257                          *
2258                          * We want the freeing of this indirect block to be
2259                          * atomic in the journal with the updating of the
2260                          * bitmap block which owns it.  So make some room in
2261                          * the journal.
2262                          *
2263                          * We zero the parent pointer *after* freeing its
2264                          * pointee in the bitmaps, so if extend_transaction()
2265                          * for some reason fails to put the bitmap changes and
2266                          * the release into the same transaction, recovery
2267                          * will merely complain about releasing a free block,
2268                          * rather than leaking blocks.
2269                          */
2270                         if (is_handle_aborted(handle))
2271                                 return;
2272                         if (try_to_extend_transaction(handle, inode)) {
2273                                 ext4_mark_inode_dirty(handle, inode);
2274                                 ext4_journal_test_restart(handle, inode);
2275                         }
2276
2277                         ext4_free_blocks(handle, inode, nr, 1, 1);
2278
2279                         if (parent_bh) {
2280                                 /*
2281                                  * The block which we have just freed is
2282                                  * pointed to by an indirect block: journal it
2283                                  */
2284                                 BUFFER_TRACE(parent_bh, "get_write_access");
2285                                 if (!ext4_journal_get_write_access(handle,
2286                                                                    parent_bh)){
2287                                         *p = 0;
2288                                         BUFFER_TRACE(parent_bh,
2289                                         "call ext4_journal_dirty_metadata");
2290                                         ext4_journal_dirty_metadata(handle,
2291                                                                     parent_bh);
2292                                 }
2293                         }
2294                 }
2295         } else {
2296                 /* We have reached the bottom of the tree. */
2297                 BUFFER_TRACE(parent_bh, "free data blocks");
2298                 ext4_free_data(handle, inode, parent_bh, first, last);
2299         }
2300 }
2301
2302 int ext4_can_truncate(struct inode *inode)
2303 {
2304         if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2305                 return 0;
2306         if (S_ISREG(inode->i_mode))
2307                 return 1;
2308         if (S_ISDIR(inode->i_mode))
2309                 return 1;
2310         if (S_ISLNK(inode->i_mode))
2311                 return !ext4_inode_is_fast_symlink(inode);
2312         return 0;
2313 }
2314
2315 /*
2316  * ext4_truncate()
2317  *
2318  * We block out ext4_get_block() block instantiations across the entire
2319  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
2320  * simultaneously on behalf of the same inode.
2321  *
2322  * As we work through the truncate and commmit bits of it to the journal there
2323  * is one core, guiding principle: the file's tree must always be consistent on
2324  * disk.  We must be able to restart the truncate after a crash.
2325  *
2326  * The file's tree may be transiently inconsistent in memory (although it
2327  * probably isn't), but whenever we close off and commit a journal transaction,
2328  * the contents of (the filesystem + the journal) must be consistent and
2329  * restartable.  It's pretty simple, really: bottom up, right to left (although
2330  * left-to-right works OK too).
2331  *
2332  * Note that at recovery time, journal replay occurs *before* the restart of
2333  * truncate against the orphan inode list.
2334  *
2335  * The committed inode has the new, desired i_size (which is the same as
2336  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
2337  * that this inode's truncate did not complete and it will again call
2338  * ext4_truncate() to have another go.  So there will be instantiated blocks
2339  * to the right of the truncation point in a crashed ext4 filesystem.  But
2340  * that's fine - as long as they are linked from the inode, the post-crash
2341  * ext4_truncate() run will find them and release them.
2342  */
2343 void ext4_truncate(struct inode *inode)
2344 {
2345         handle_t *handle;
2346         struct ext4_inode_info *ei = EXT4_I(inode);
2347         __le32 *i_data = ei->i_data;
2348         int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
2349         struct address_space *mapping = inode->i_mapping;
2350         ext4_lblk_t offsets[4];
2351         Indirect chain[4];
2352         Indirect *partial;
2353         __le32 nr = 0;
2354         int n;
2355         ext4_lblk_t last_block;
2356         unsigned blocksize = inode->i_sb->s_blocksize;
2357
2358         if (!ext4_can_truncate(inode))
2359                 return;
2360
2361         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
2362                 ext4_ext_truncate(inode);
2363                 return;
2364         }
2365
2366         handle = start_transaction(inode);
2367         if (IS_ERR(handle))
2368                 return;         /* AKPM: return what? */
2369
2370         last_block = (inode->i_size + blocksize-1)
2371                                         >> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
2372
2373         if (inode->i_size & (blocksize - 1))
2374                 if (ext4_block_truncate_page(handle, mapping, inode->i_size))
2375                         goto out_stop;
2376
2377         n = ext4_block_to_path(inode, last_block, offsets, NULL);
2378         if (n == 0)
2379                 goto out_stop;  /* error */
2380
2381         /*
2382          * OK.  This truncate is going to happen.  We add the inode to the
2383          * orphan list, so that if this truncate spans multiple transactions,
2384          * and we crash, we will resume the truncate when the filesystem
2385          * recovers.  It also marks the inode dirty, to catch the new size.
2386          *
2387          * Implication: the file must always be in a sane, consistent
2388          * truncatable state while each transaction commits.
2389          */
2390         if (ext4_orphan_add(handle, inode))
2391                 goto out_stop;
2392
2393         /*
2394          * The orphan list entry will now protect us from any crash which
2395          * occurs before the truncate completes, so it is now safe to propagate
2396          * the new, shorter inode size (held for now in i_size) into the
2397          * on-disk inode. We do this via i_disksize, which is the value which
2398          * ext4 *really* writes onto the disk inode.
2399          */
2400         ei->i_disksize = inode->i_size;
2401
2402         /*
2403          * From here we block out all ext4_get_block() callers who want to
2404          * modify the block allocation tree.
2405          */
2406         down_write(&ei->i_data_sem);
2407
2408         if (n == 1) {           /* direct blocks */
2409                 ext4_free_data(handle, inode, NULL, i_data+offsets[0],
2410                                i_data + EXT4_NDIR_BLOCKS);
2411                 goto do_indirects;
2412         }
2413
2414         partial = ext4_find_shared(inode, n, offsets, chain, &nr);
2415         /* Kill the top of shared branch (not detached) */
2416         if (nr) {
2417                 if (partial == chain) {
2418                         /* Shared branch grows from the inode */
2419                         ext4_free_branches(handle, inode, NULL,
2420                                            &nr, &nr+1, (chain+n-1) - partial);
2421                         *partial->p = 0;
2422                         /*
2423                          * We mark the inode dirty prior to restart,
2424                          * and prior to stop.  No need for it here.
2425                          */
2426                 } else {
2427                         /* Shared branch grows from an indirect block */
2428                         BUFFER_TRACE(partial->bh, "get_write_access");
2429                         ext4_free_branches(handle, inode, partial->bh,
2430                                         partial->p,
2431                                         partial->p+1, (chain+n-1) - partial);
2432                 }
2433         }
2434         /* Clear the ends of indirect blocks on the shared branch */
2435         while (partial > chain) {
2436                 ext4_free_branches(handle, inode, partial->bh, partial->p + 1,
2437                                    (__le32*)partial->bh->b_data+addr_per_block,
2438                                    (chain+n-1) - partial);
2439                 BUFFER_TRACE(partial->bh, "call brelse");
2440                 brelse (partial->bh);
2441                 partial--;
2442         }
2443 do_indirects:
2444         /* Kill the remaining (whole) subtrees */
2445         switch (offsets[0]) {
2446         default:
2447                 nr = i_data[EXT4_IND_BLOCK];
2448                 if (nr) {
2449                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
2450                         i_data[EXT4_IND_BLOCK] = 0;
2451                 }
2452         case EXT4_IND_BLOCK:
2453                 nr = i_data[EXT4_DIND_BLOCK];
2454                 if (nr) {
2455                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
2456                         i_data[EXT4_DIND_BLOCK] = 0;
2457                 }
2458         case EXT4_DIND_BLOCK:
2459                 nr = i_data[EXT4_TIND_BLOCK];
2460                 if (nr) {
2461                         ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
2462                         i_data[EXT4_TIND_BLOCK] = 0;
2463                 }
2464         case EXT4_TIND_BLOCK:
2465                 ;
2466         }
2467
2468         ext4_discard_reservation(inode);
2469
2470         up_write(&ei->i_data_sem);
2471         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
2472         ext4_mark_inode_dirty(handle, inode);
2473
2474         /*
2475          * In a multi-transaction truncate, we only make the final transaction
2476          * synchronous
2477          */
2478         if (IS_SYNC(inode))
2479                 handle->h_sync = 1;
2480 out_stop:
2481         /*
2482          * If this was a simple ftruncate(), and the file will remain alive
2483          * then we need to clear up the orphan record which we created above.
2484          * However, if this was a real unlink then we were called by
2485          * ext4_delete_inode(), and we allow that function to clean up the
2486          * orphan info for us.
2487          */
2488         if (inode->i_nlink)
2489                 ext4_orphan_del(handle, inode);
2490
2491         ext4_journal_stop(handle);
2492 }
2493
2494 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
2495                 unsigned long ino, struct ext4_iloc *iloc)
2496 {
2497         ext4_group_t block_group;
2498         unsigned long offset;
2499         ext4_fsblk_t block;
2500         struct ext4_group_desc *gdp;
2501
2502         if (!ext4_valid_inum(sb, ino)) {
2503                 /*
2504                  * This error is already checked for in namei.c unless we are
2505                  * looking at an NFS filehandle, in which case no error
2506                  * report is needed
2507                  */
2508                 return 0;
2509         }
2510
2511         block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
2512         gdp = ext4_get_group_desc(sb, block_group, NULL);
2513         if (!gdp)
2514                 return 0;
2515
2516         /*
2517          * Figure out the offset within the block group inode table
2518          */
2519         offset = ((ino - 1) % EXT4_INODES_PER_GROUP(sb)) *
2520                 EXT4_INODE_SIZE(sb);
2521         block = ext4_inode_table(sb, gdp) +
2522                 (offset >> EXT4_BLOCK_SIZE_BITS(sb));
2523
2524         iloc->block_group = block_group;
2525         iloc->offset = offset & (EXT4_BLOCK_SIZE(sb) - 1);
2526         return block;
2527 }
2528
2529 /*
2530  * ext4_get_inode_loc returns with an extra refcount against the inode's
2531  * underlying buffer_head on success. If 'in_mem' is true, we have all
2532  * data in memory that is needed to recreate the on-disk version of this
2533  * inode.
2534  */
2535 static int __ext4_get_inode_loc(struct inode *inode,
2536                                 struct ext4_iloc *iloc, int in_mem)
2537 {
2538         ext4_fsblk_t block;
2539         struct buffer_head *bh;
2540
2541         block = ext4_get_inode_block(inode->i_sb, inode->i_ino, iloc);
2542         if (!block)
2543                 return -EIO;
2544
2545         bh = sb_getblk(inode->i_sb, block);
2546         if (!bh) {
2547                 ext4_error (inode->i_sb, "ext4_get_inode_loc",
2548                                 "unable to read inode block - "
2549                                 "inode=%lu, block=%llu",
2550                                  inode->i_ino, block);
2551                 return -EIO;
2552         }
2553         if (!buffer_uptodate(bh)) {
2554                 lock_buffer(bh);
2555                 if (buffer_uptodate(bh)) {
2556                         /* someone brought it uptodate while we waited */
2557                         unlock_buffer(bh);
2558                         goto has_buffer;
2559                 }
2560
2561                 /*
2562                  * If we have all information of the inode in memory and this
2563                  * is the only valid inode in the block, we need not read the
2564                  * block.
2565                  */
2566                 if (in_mem) {
2567                         struct buffer_head *bitmap_bh;
2568                         struct ext4_group_desc *desc;
2569                         int inodes_per_buffer;
2570                         int inode_offset, i;
2571                         ext4_group_t block_group;
2572                         int start;
2573
2574                         block_group = (inode->i_ino - 1) /
2575                                         EXT4_INODES_PER_GROUP(inode->i_sb);
2576                         inodes_per_buffer = bh->b_size /
2577                                 EXT4_INODE_SIZE(inode->i_sb);
2578                         inode_offset = ((inode->i_ino - 1) %
2579                                         EXT4_INODES_PER_GROUP(inode->i_sb));
2580                         start = inode_offset & ~(inodes_per_buffer - 1);
2581
2582                         /* Is the inode bitmap in cache? */
2583                         desc = ext4_get_group_desc(inode->i_sb,
2584                                                 block_group, NULL);
2585                         if (!desc)
2586                                 goto make_io;
2587
2588                         bitmap_bh = sb_getblk(inode->i_sb,
2589                                 ext4_inode_bitmap(inode->i_sb, desc));
2590                         if (!bitmap_bh)
2591                                 goto make_io;
2592
2593                         /*
2594                          * If the inode bitmap isn't in cache then the
2595                          * optimisation may end up performing two reads instead
2596                          * of one, so skip it.
2597                          */
2598                         if (!buffer_uptodate(bitmap_bh)) {
2599                                 brelse(bitmap_bh);
2600                                 goto make_io;
2601                         }
2602                         for (i = start; i < start + inodes_per_buffer; i++) {
2603                                 if (i == inode_offset)
2604                                         continue;
2605                                 if (ext4_test_bit(i, bitmap_bh->b_data))
2606                                         break;
2607                         }
2608                         brelse(bitmap_bh);
2609                         if (i == start + inodes_per_buffer) {
2610                                 /* all other inodes are free, so skip I/O */
2611                                 memset(bh->b_data, 0, bh->b_size);
2612                                 set_buffer_uptodate(bh);
2613                                 unlock_buffer(bh);
2614                                 goto has_buffer;
2615                         }
2616                 }
2617
2618 make_io:
2619                 /*
2620                  * There are other valid inodes in the buffer, this inode
2621                  * has in-inode xattrs, or we don't have this inode in memory.
2622                  * Read the block from disk.
2623                  */
2624                 get_bh(bh);
2625                 bh->b_end_io = end_buffer_read_sync;
2626                 submit_bh(READ_META, bh);
2627                 wait_on_buffer(bh);
2628                 if (!buffer_uptodate(bh)) {
2629                         ext4_error(inode->i_sb, "ext4_get_inode_loc",
2630                                         "unable to read inode block - "
2631                                         "inode=%lu, block=%llu",
2632                                         inode->i_ino, block);
2633                         brelse(bh);
2634                         return -EIO;
2635                 }
2636         }
2637 has_buffer:
2638         iloc->bh = bh;
2639         return 0;
2640 }
2641
2642 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
2643 {
2644         /* We have all inode data except xattrs in memory here. */
2645         return __ext4_get_inode_loc(inode, iloc,
2646                 !(EXT4_I(inode)->i_state & EXT4_STATE_XATTR));
2647 }
2648
2649 void ext4_set_inode_flags(struct inode *inode)
2650 {
2651         unsigned int flags = EXT4_I(inode)->i_flags;
2652
2653         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
2654         if (flags & EXT4_SYNC_FL)
2655                 inode->i_flags |= S_SYNC;
2656         if (flags & EXT4_APPEND_FL)
2657                 inode->i_flags |= S_APPEND;
2658         if (flags & EXT4_IMMUTABLE_FL)
2659                 inode->i_flags |= S_IMMUTABLE;
2660         if (flags & EXT4_NOATIME_FL)
2661                 inode->i_flags |= S_NOATIME;
2662         if (flags & EXT4_DIRSYNC_FL)
2663                 inode->i_flags |= S_DIRSYNC;
2664 }
2665
2666 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
2667 void ext4_get_inode_flags(struct ext4_inode_info *ei)
2668 {
2669         unsigned int flags = ei->vfs_inode.i_flags;
2670
2671         ei->i_flags &= ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
2672                         EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|EXT4_DIRSYNC_FL);
2673         if (flags & S_SYNC)
2674                 ei->i_flags |= EXT4_SYNC_FL;
2675         if (flags & S_APPEND)
2676                 ei->i_flags |= EXT4_APPEND_FL;
2677         if (flags & S_IMMUTABLE)
2678                 ei->i_flags |= EXT4_IMMUTABLE_FL;
2679         if (flags & S_NOATIME)
2680                 ei->i_flags |= EXT4_NOATIME_FL;
2681         if (flags & S_DIRSYNC)
2682                 ei->i_flags |= EXT4_DIRSYNC_FL;
2683 }
2684 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
2685                                         struct ext4_inode_info *ei)
2686 {
2687         blkcnt_t i_blocks ;
2688         struct inode *inode = &(ei->vfs_inode);
2689         struct super_block *sb = inode->i_sb;
2690
2691         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
2692                                 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2693                 /* we are using combined 48 bit field */
2694                 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
2695                                         le32_to_cpu(raw_inode->i_blocks_lo);
2696                 if (ei->i_flags & EXT4_HUGE_FILE_FL) {
2697                         /* i_blocks represent file system block size */
2698                         return i_blocks  << (inode->i_blkbits - 9);
2699                 } else {
2700                         return i_blocks;
2701                 }
2702         } else {
2703                 return le32_to_cpu(raw_inode->i_blocks_lo);
2704         }
2705 }
2706
2707 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
2708 {
2709         struct ext4_iloc iloc;
2710         struct ext4_inode *raw_inode;
2711         struct ext4_inode_info *ei;
2712         struct buffer_head *bh;
2713         struct inode *inode;
2714         long ret;
2715         int block;
2716
2717         inode = iget_locked(sb, ino);
2718         if (!inode)
2719                 return ERR_PTR(-ENOMEM);
2720         if (!(inode->i_state & I_NEW))
2721                 return inode;
2722
2723         ei = EXT4_I(inode);
2724 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL
2725         ei->i_acl = EXT4_ACL_NOT_CACHED;
2726         ei->i_default_acl = EXT4_ACL_NOT_CACHED;
2727 #endif
2728         ei->i_block_alloc_info = NULL;
2729
2730         ret = __ext4_get_inode_loc(inode, &iloc, 0);
2731         if (ret < 0)
2732                 goto bad_inode;
2733         bh = iloc.bh;
2734         raw_inode = ext4_raw_inode(&iloc);
2735         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
2736         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
2737         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
2738         if(!(test_opt (inode->i_sb, NO_UID32))) {
2739                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
2740                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
2741         }
2742         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
2743
2744         ei->i_state = 0;
2745         ei->i_dir_start_lookup = 0;
2746         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
2747         /* We now have enough fields to check if the inode was active or not.
2748          * This is needed because nfsd might try to access dead inodes
2749          * the test is that same one that e2fsck uses
2750          * NeilBrown 1999oct15
2751          */
2752         if (inode->i_nlink == 0) {
2753                 if (inode->i_mode == 0 ||
2754                     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
2755                         /* this inode is deleted */
2756                         brelse (bh);
2757                         ret = -ESTALE;
2758                         goto bad_inode;
2759                 }
2760                 /* The only unlinked inodes we let through here have
2761                  * valid i_mode and are being read by the orphan
2762                  * recovery code: that's fine, we're about to complete
2763                  * the process of deleting those. */
2764         }
2765         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
2766         inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
2767         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
2768         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2769             cpu_to_le32(EXT4_OS_HURD)) {
2770                 ei->i_file_acl |=
2771                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
2772         }
2773         inode->i_size = ext4_isize(raw_inode);
2774         ei->i_disksize = inode->i_size;
2775         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
2776         ei->i_block_group = iloc.block_group;
2777         /*
2778          * NOTE! The in-memory inode i_data array is in little-endian order
2779          * even on big-endian machines: we do NOT byteswap the block numbers!
2780          */
2781         for (block = 0; block < EXT4_N_BLOCKS; block++)
2782                 ei->i_data[block] = raw_inode->i_block[block];
2783         INIT_LIST_HEAD(&ei->i_orphan);
2784
2785         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2786                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
2787                 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
2788                     EXT4_INODE_SIZE(inode->i_sb)) {
2789                         brelse (bh);
2790                         ret = -EIO;
2791                         goto bad_inode;
2792                 }
2793                 if (ei->i_extra_isize == 0) {
2794                         /* The extra space is currently unused. Use it. */
2795                         ei->i_extra_isize = sizeof(struct ext4_inode) -
2796                                             EXT4_GOOD_OLD_INODE_SIZE;
2797                 } else {
2798                         __le32 *magic = (void *)raw_inode +
2799                                         EXT4_GOOD_OLD_INODE_SIZE +
2800                                         ei->i_extra_isize;
2801                         if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2802                                  ei->i_state |= EXT4_STATE_XATTR;
2803                 }
2804         } else
2805                 ei->i_extra_isize = 0;
2806
2807         EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
2808         EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
2809         EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
2810         EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
2811
2812         inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
2813         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
2814                 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
2815                         inode->i_version |=
2816                         (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
2817         }
2818
2819         if (S_ISREG(inode->i_mode)) {
2820                 inode->i_op = &ext4_file_inode_operations;
2821                 inode->i_fop = &ext4_file_operations;
2822                 ext4_set_aops(inode);
2823         } else if (S_ISDIR(inode->i_mode)) {
2824                 inode->i_op = &ext4_dir_inode_operations;
2825                 inode->i_fop = &ext4_dir_operations;
2826         } else if (S_ISLNK(inode->i_mode)) {
2827                 if (ext4_inode_is_fast_symlink(inode))
2828                         inode->i_op = &ext4_fast_symlink_inode_operations;
2829                 else {
2830                         inode->i_op = &ext4_symlink_inode_operations;
2831                         ext4_set_aops(inode);
2832                 }
2833         } else {
2834                 inode->i_op = &ext4_special_inode_operations;
2835                 if (raw_inode->i_block[0])
2836                         init_special_inode(inode, inode->i_mode,
2837                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
2838                 else
2839                         init_special_inode(inode, inode->i_mode,
2840                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
2841         }
2842         brelse (iloc.bh);
2843         ext4_set_inode_flags(inode);
2844         unlock_new_inode(inode);
2845         return inode;
2846
2847 bad_inode:
2848         iget_failed(inode);
2849         return ERR_PTR(ret);
2850 }
2851
2852 static int ext4_inode_blocks_set(handle_t *handle,
2853                                 struct ext4_inode *raw_inode,
2854                                 struct ext4_inode_info *ei)
2855 {
2856         struct inode *inode = &(ei->vfs_inode);
2857         u64 i_blocks = inode->i_blocks;
2858         struct super_block *sb = inode->i_sb;
2859         int err = 0;
2860
2861         if (i_blocks <= ~0U) {
2862                 /*
2863                  * i_blocks can be represnted in a 32 bit variable
2864                  * as multiple of 512 bytes
2865                  */
2866                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2867                 raw_inode->i_blocks_high = 0;
2868                 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2869         } else if (i_blocks <= 0xffffffffffffULL) {
2870                 /*
2871                  * i_blocks can be represented in a 48 bit variable
2872                  * as multiple of 512 bytes
2873                  */
2874                 err = ext4_update_rocompat_feature(handle, sb,
2875                                             EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2876                 if (err)
2877                         goto  err_out;
2878                 /* i_block is stored in the split  48 bit fields */
2879                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2880                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2881                 ei->i_flags &= ~EXT4_HUGE_FILE_FL;
2882         } else {
2883                 /*
2884                  * i_blocks should be represented in a 48 bit variable
2885                  * as multiple of  file system block size
2886                  */
2887                 err = ext4_update_rocompat_feature(handle, sb,
2888                                             EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2889                 if (err)
2890                         goto  err_out;
2891                 ei->i_flags |= EXT4_HUGE_FILE_FL;
2892                 /* i_block is stored in file system block size */
2893                 i_blocks = i_blocks >> (inode->i_blkbits - 9);
2894                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
2895                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
2896         }
2897 err_out:
2898         return err;
2899 }
2900
2901 /*
2902  * Post the struct inode info into an on-disk inode location in the
2903  * buffer-cache.  This gobbles the caller's reference to the
2904  * buffer_head in the inode location struct.
2905  *
2906  * The caller must have write access to iloc->bh.
2907  */
2908 static int ext4_do_update_inode(handle_t *handle,
2909                                 struct inode *inode,
2910                                 struct ext4_iloc *iloc)
2911 {
2912         struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
2913         struct ext4_inode_info *ei = EXT4_I(inode);
2914         struct buffer_head *bh = iloc->bh;
2915         int err = 0, rc, block;
2916
2917         /* For fields not not tracking in the in-memory inode,
2918          * initialise them to zero for new inodes. */
2919         if (ei->i_state & EXT4_STATE_NEW)
2920                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2921
2922         ext4_get_inode_flags(ei);
2923         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
2924         if(!(test_opt(inode->i_sb, NO_UID32))) {
2925                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
2926                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
2927 /*
2928  * Fix up interoperability with old kernels. Otherwise, old inodes get
2929  * re-used with the upper 16 bits of the uid/gid intact
2930  */
2931                 if(!ei->i_dtime) {
2932                         raw_inode->i_uid_high =
2933                                 cpu_to_le16(high_16_bits(inode->i_uid));
2934                         raw_inode->i_gid_high =
2935                                 cpu_to_le16(high_16_bits(inode->i_gid));
2936                 } else {
2937                         raw_inode->i_uid_high = 0;
2938                         raw_inode->i_gid_high = 0;
2939                 }
2940         } else {
2941                 raw_inode->i_uid_low =
2942                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
2943                 raw_inode->i_gid_low =
2944                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
2945                 raw_inode->i_uid_high = 0;
2946                 raw_inode->i_gid_high = 0;
2947         }
2948         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
2949
2950         EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
2951         EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
2952         EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
2953         EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
2954
2955         if (ext4_inode_blocks_set(handle, raw_inode, ei))
2956                 goto out_brelse;
2957         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
2958         /* clear the migrate flag in the raw_inode */
2959         raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
2960         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
2961             cpu_to_le32(EXT4_OS_HURD))
2962                 raw_inode->i_file_acl_high =
2963                         cpu_to_le16(ei->i_file_acl >> 32);
2964         raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
2965         ext4_isize_set(raw_inode, ei->i_disksize);
2966         if (ei->i_disksize > 0x7fffffffULL) {
2967                 struct super_block *sb = inode->i_sb;
2968                 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
2969                                 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
2970                                 EXT4_SB(sb)->s_es->s_rev_level ==
2971                                 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
2972                         /* If this is the first large file
2973                          * created, add a flag to the superblock.
2974                          */
2975                         err = ext4_journal_get_write_access(handle,
2976                                         EXT4_SB(sb)->s_sbh);
2977                         if (err)
2978                                 goto out_brelse;
2979                         ext4_update_dynamic_rev(sb);
2980                         EXT4_SET_RO_COMPAT_FEATURE(sb,
2981                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
2982                         sb->s_dirt = 1;
2983                         handle->h_sync = 1;
2984                         err = ext4_journal_dirty_metadata(handle,
2985                                         EXT4_SB(sb)->s_sbh);
2986                 }
2987         }
2988         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
2989         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
2990                 if (old_valid_dev(inode->i_rdev)) {
2991                         raw_inode->i_block[0] =
2992                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
2993                         raw_inode->i_block[1] = 0;
2994                 } else {
2995                         raw_inode->i_block[0] = 0;
2996                         raw_inode->i_block[1] =
2997                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
2998                         raw_inode->i_block[2] = 0;
2999                 }
3000         } else for (block = 0; block < EXT4_N_BLOCKS; block++)
3001                 raw_inode->i_block[block] = ei->i_data[block];
3002
3003         raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
3004         if (ei->i_extra_isize) {
3005                 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3006                         raw_inode->i_version_hi =
3007                         cpu_to_le32(inode->i_version >> 32);
3008                 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3009         }
3010
3011
3012         BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata");
3013         rc = ext4_journal_dirty_metadata(handle, bh);
3014         if (!err)
3015                 err = rc;
3016         ei->i_state &= ~EXT4_STATE_NEW;
3017
3018 out_brelse:
3019         brelse (bh);
3020         ext4_std_error(inode->i_sb, err);
3021         return err;
3022 }
3023
3024 /*
3025  * ext4_write_inode()
3026  *
3027  * We are called from a few places:
3028  *
3029  * - Within generic_file_write() for O_SYNC files.
3030  *   Here, there will be no transaction running. We wait for any running
3031  *   trasnaction to commit.
3032  *
3033  * - Within sys_sync(), kupdate and such.
3034  *   We wait on commit, if tol to.
3035  *
3036  * - Within prune_icache() (PF_MEMALLOC == true)
3037  *   Here we simply return.  We can't afford to block kswapd on the
3038  *   journal commit.
3039  *
3040  * In all cases it is actually safe for us to return without doing anything,
3041  * because the inode has been copied into a raw inode buffer in
3042  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
3043  * knfsd.
3044  *
3045  * Note that we are absolutely dependent upon all inode dirtiers doing the
3046  * right thing: they *must* call mark_inode_dirty() after dirtying info in
3047  * which we are interested.
3048  *
3049  * It would be a bug for them to not do this.  The code:
3050  *
3051  *      mark_inode_dirty(inode)
3052  *      stuff();
3053  *      inode->i_size = expr;
3054  *
3055  * is in error because a kswapd-driven write_inode() could occur while
3056  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
3057  * will no longer be on the superblock's dirty inode list.
3058  */
3059 int ext4_write_inode(struct inode *inode, int wait)
3060 {
3061         if (current->flags & PF_MEMALLOC)
3062                 return 0;
3063
3064         if (ext4_journal_current_handle()) {
3065                 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
3066                 dump_stack();
3067                 return -EIO;
3068         }
3069
3070         if (!wait)
3071                 return 0;
3072
3073         return ext4_force_commit(inode->i_sb);
3074 }
3075
3076 /*
3077  * ext4_setattr()
3078  *
3079  * Called from notify_change.
3080  *
3081  * We want to trap VFS attempts to truncate the file as soon as
3082  * possible.  In particular, we want to make sure that when the VFS
3083  * shrinks i_size, we put the inode on the orphan list and modify
3084  * i_disksize immediately, so that during the subsequent flushing of
3085  * dirty pages and freeing of disk blocks, we can guarantee that any
3086  * commit will leave the blocks being flushed in an unused state on
3087  * disk.  (On recovery, the inode will get truncated and the blocks will
3088  * be freed, so we have a strong guarantee that no future commit will
3089  * leave these blocks visible to the user.)
3090  *
3091  * Another thing we have to assure is that if we are in ordered mode
3092  * and inode is still attached to the committing transaction, we must
3093  * we start writeout of all the dirty pages which are being truncated.
3094  * This way we are sure that all the data written in the previous
3095  * transaction are already on disk (truncate waits for pages under
3096  * writeback).
3097  *
3098  * Called with inode->i_mutex down.
3099  */
3100 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3101 {
3102         struct inode *inode = dentry->d_inode;
3103         int error, rc = 0;
3104         const unsigned int ia_valid = attr->ia_valid;
3105
3106         error = inode_change_ok(inode, attr);
3107         if (error)
3108                 return error;
3109
3110         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
3111                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
3112                 handle_t *handle;
3113
3114                 /* (user+group)*(old+new) structure, inode write (sb,
3115                  * inode block, ? - but truncate inode update has it) */
3116                 handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
3117                                         EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
3118                 if (IS_ERR(handle)) {
3119                         error = PTR_ERR(handle);
3120                         goto err_out;
3121                 }
3122                 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
3123                 if (error) {
3124                         ext4_journal_stop(handle);
3125                         return error;
3126                 }
3127                 /* Update corresponding info in inode so that everything is in
3128                  * one transaction */
3129                 if (attr->ia_valid & ATTR_UID)
3130                         inode->i_uid = attr->ia_uid;
3131                 if (attr->ia_valid & ATTR_GID)
3132                         inode->i_gid = attr->ia_gid;
3133                 error = ext4_mark_inode_dirty(handle, inode);
3134                 ext4_journal_stop(handle);
3135         }
3136
3137         if (attr->ia_valid & ATTR_SIZE) {
3138                 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
3139                         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3140
3141                         if (attr->ia_size > sbi->s_bitmap_maxbytes) {
3142                                 error = -EFBIG;
3143                                 goto err_out;
3144                         }
3145                 }
3146         }
3147
3148         if (S_ISREG(inode->i_mode) &&
3149             attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) {
3150                 handle_t *handle;
3151
3152                 handle = ext4_journal_start(inode, 3);
3153                 if (IS_ERR(handle)) {
3154                         error = PTR_ERR(handle);
3155                         goto err_out;
3156                 }
3157
3158                 error = ext4_orphan_add(handle, inode);
3159                 EXT4_I(inode)->i_disksize = attr->ia_size;
3160                 rc = ext4_mark_inode_dirty(handle, inode);
3161                 if (!error)
3162                         error = rc;
3163                 ext4_journal_stop(handle);
3164
3165                 if (ext4_should_order_data(inode)) {
3166                         error = ext4_begin_ordered_truncate(inode,
3167                                                             attr->ia_size);
3168                         if (error) {
3169                                 /* Do as much error cleanup as possible */
3170                                 handle = ext4_journal_start(inode, 3);
3171                                 if (IS_ERR(handle)) {
3172                                         ext4_orphan_del(NULL, inode);
3173                                         goto err_out;
3174                                 }
3175                                 ext4_orphan_del(handle, inode);
3176                                 ext4_journal_stop(handle);
3177                                 goto err_out;
3178                         }
3179                 }
3180         }
3181
3182         rc = inode_setattr(inode, attr);
3183
3184         /* If inode_setattr's call to ext4_truncate failed to get a
3185          * transaction handle at all, we need to clean up the in-core
3186          * orphan list manually. */
3187         if (inode->i_nlink)
3188                 ext4_orphan_del(NULL, inode);
3189
3190         if (!rc && (ia_valid & ATTR_MODE))
3191                 rc = ext4_acl_chmod(inode);
3192
3193 err_out:
3194         ext4_std_error(inode->i_sb, error);
3195         if (!error)
3196                 error = rc;
3197         return error;
3198 }
3199
3200
3201 /*
3202  * How many blocks doth make a writepage()?
3203  *
3204  * With N blocks per page, it may be:
3205  * N data blocks
3206  * 2 indirect block
3207  * 2 dindirect
3208  * 1 tindirect
3209  * N+5 bitmap blocks (from the above)
3210  * N+5 group descriptor summary blocks
3211  * 1 inode block
3212  * 1 superblock.
3213  * 2 * EXT4_SINGLEDATA_TRANS_BLOCKS for the quote files
3214  *
3215  * 3 * (N + 5) + 2 + 2 * EXT4_SINGLEDATA_TRANS_BLOCKS
3216  *
3217  * With ordered or writeback data it's the same, less the N data blocks.
3218  *
3219  * If the inode's direct blocks can hold an integral number of pages then a
3220  * page cannot straddle two indirect blocks, and we can only touch one indirect
3221  * and dindirect block, and the "5" above becomes "3".
3222  *
3223  * This still overestimates under most circumstances.  If we were to pass the
3224  * start and end offsets in here as well we could do block_to_path() on each
3225  * block and work out the exact number of indirects which are touched.  Pah.
3226  */
3227
3228 int ext4_writepage_trans_blocks(struct inode *inode)
3229 {
3230         int bpp = ext4_journal_blocks_per_page(inode);
3231         int indirects = (EXT4_NDIR_BLOCKS % bpp) ? 5 : 3;
3232         int ret;
3233
3234         if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
3235                 return ext4_ext_writepage_trans_blocks(inode, bpp);
3236
3237         if (ext4_should_journal_data(inode))
3238                 ret = 3 * (bpp + indirects) + 2;
3239         else
3240                 ret = 2 * (bpp + indirects) + 2;
3241
3242 #ifdef CONFIG_QUOTA
3243         /* We know that structure was already allocated during DQUOT_INIT so
3244          * we will be updating only the data blocks + inodes */
3245         ret += 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
3246 #endif
3247
3248         return ret;
3249 }
3250
3251 /*
3252  * The caller must have previously called ext4_reserve_inode_write().
3253  * Give this, we know that the caller already has write access to iloc->bh.
3254  */
3255 int ext4_mark_iloc_dirty(handle_t *handle,
3256                 struct inode *inode, struct ext4_iloc *iloc)
3257 {
3258         int err = 0;
3259
3260         if (test_opt(inode->i_sb, I_VERSION))
3261                 inode_inc_iversion(inode);
3262
3263         /* the do_update_inode consumes one bh->b_count */
3264         get_bh(iloc->bh);
3265
3266         /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
3267         err = ext4_do_update_inode(handle, inode, iloc);
3268         put_bh(iloc->bh);
3269         return err;
3270 }
3271
3272 /*
3273  * On success, We end up with an outstanding reference count against
3274  * iloc->bh.  This _must_ be cleaned up later.
3275  */
3276
3277 int
3278 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
3279                          struct ext4_iloc *iloc)
3280 {
3281         int err = 0;
3282         if (handle) {
3283                 err = ext4_get_inode_loc(inode, iloc);
3284                 if (!err) {
3285                         BUFFER_TRACE(iloc->bh, "get_write_access");
3286                         err = ext4_journal_get_write_access(handle, iloc->bh);
3287                         if (err) {
3288                                 brelse(iloc->bh);
3289                                 iloc->bh = NULL;
3290                         }
3291                 }
3292         }
3293         ext4_std_error(inode->i_sb, err);
3294         return err;
3295 }
3296
3297 /*
3298  * Expand an inode by new_extra_isize bytes.
3299  * Returns 0 on success or negative error number on failure.
3300  */
3301 static int ext4_expand_extra_isize(struct inode *inode,
3302                                    unsigned int new_extra_isize,
3303                                    struct ext4_iloc iloc,
3304                                    handle_t *handle)
3305 {
3306         struct ext4_inode *raw_inode;
3307         struct ext4_xattr_ibody_header *header;
3308         struct ext4_xattr_entry *entry;
3309
3310         if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
3311                 return 0;
3312
3313         raw_inode = ext4_raw_inode(&iloc);
3314
3315         header = IHDR(inode, raw_inode);
3316         entry = IFIRST(header);
3317
3318         /* No extended attributes present */
3319         if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR) ||
3320                 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
3321                 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
3322                         new_extra_isize);
3323                 EXT4_I(inode)->i_extra_isize = new_extra_isize;
3324                 return 0;
3325         }
3326
3327         /* try to expand with EAs present */
3328         return ext4_expand_extra_isize_ea(inode, new_extra_isize,
3329                                           raw_inode, handle);
3330 }
3331
3332 /*
3333  * What we do here is to mark the in-core inode as clean with respect to inode
3334  * dirtiness (it may still be data-dirty).
3335  * This means that the in-core inode may be reaped by prune_icache
3336  * without having to perform any I/O.  This is a very good thing,
3337  * because *any* task may call prune_icache - even ones which
3338  * have a transaction open against a different journal.
3339  *
3340  * Is this cheating?  Not really.  Sure, we haven't written the
3341  * inode out, but prune_icache isn't a user-visible syncing function.
3342  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
3343  * we start and wait on commits.
3344  *
3345  * Is this efficient/effective?  Well, we're being nice to the system
3346  * by cleaning up our inodes proactively so they can be reaped
3347  * without I/O.  But we are potentially leaving up to five seconds'
3348  * worth of inodes floating about which prune_icache wants us to
3349  * write out.  One way to fix that would be to get prune_icache()
3350  * to do a write_super() to free up some memory.  It has the desired
3351  * effect.
3352  */
3353 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
3354 {
3355         struct ext4_iloc iloc;
3356         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3357         static unsigned int mnt_count;
3358         int err, ret;
3359
3360         might_sleep();
3361         err = ext4_reserve_inode_write(handle, inode, &iloc);
3362         if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
3363             !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) {
3364                 /*
3365                  * We need extra buffer credits since we may write into EA block
3366                  * with this same handle. If journal_extend fails, then it will
3367                  * only result in a minor loss of functionality for that inode.
3368                  * If this is felt to be critical, then e2fsck should be run to
3369                  * force a large enough s_min_extra_isize.
3370                  */
3371                 if ((jbd2_journal_extend(handle,
3372                              EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
3373                         ret = ext4_expand_extra_isize(inode,
3374                                                       sbi->s_want_extra_isize,
3375                                                       iloc, handle);
3376                         if (ret) {
3377                                 EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
3378                                 if (mnt_count !=
3379                                         le16_to_cpu(sbi->s_es->s_mnt_count)) {
3380                                         ext4_warning(inode->i_sb, __func__,
3381                                         "Unable to expand inode %lu. Delete"
3382                                         " some EAs or run e2fsck.",
3383                                         inode->i_ino);
3384                                         mnt_count =
3385                                           le16_to_cpu(sbi->s_es->s_mnt_count);
3386                                 }
3387                         }
3388                 }
3389         }
3390         if (!err)
3391                 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
3392         return err;
3393 }
3394
3395 /*
3396  * ext4_dirty_inode() is called from __mark_inode_dirty()
3397  *
3398  * We're really interested in the case where a file is being extended.
3399  * i_size has been changed by generic_commit_write() and we thus need
3400  * to include the updated inode in the current transaction.
3401  *
3402  * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
3403  * are allocated to the file.
3404  *
3405  * If the inode is marked synchronous, we don't honour that here - doing
3406  * so would cause a commit on atime updates, which we don't bother doing.
3407  * We handle synchronous inodes at the highest possible level.
3408  */
3409 void ext4_dirty_inode(struct inode *inode)
3410 {
3411         handle_t *current_handle = ext4_journal_current_handle();
3412         handle_t *handle;
3413
3414         handle = ext4_journal_start(inode, 2);
3415         if (IS_ERR(handle))
3416                 goto out;
3417         if (current_handle &&
3418                 current_handle->h_transaction != handle->h_transaction) {
3419                 /* This task has a transaction open against a different fs */
3420                 printk(KERN_EMERG "%s: transactions do not match!\n",
3421                        __func__);
3422         } else {
3423                 jbd_debug(5, "marking dirty.  outer handle=%p\n",
3424                                 current_handle);
3425                 ext4_mark_inode_dirty(handle, inode);
3426         }
3427         ext4_journal_stop(handle);
3428 out:
3429         return;
3430 }
3431
3432 #if 0
3433 /*
3434  * Bind an inode's backing buffer_head into this transaction, to prevent
3435  * it from being flushed to disk early.  Unlike
3436  * ext4_reserve_inode_write, this leaves behind no bh reference and
3437  * returns no iloc structure, so the caller needs to repeat the iloc
3438  * lookup to mark the inode dirty later.
3439  */
3440 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
3441 {
3442         struct ext4_iloc iloc;
3443
3444         int err = 0;
3445         if (handle) {
3446                 err = ext4_get_inode_loc(inode, &iloc);
3447                 if (!err) {
3448                         BUFFER_TRACE(iloc.bh, "get_write_access");
3449                         err = jbd2_journal_get_write_access(handle, iloc.bh);
3450                         if (!err)
3451                                 err = ext4_journal_dirty_metadata(handle,
3452                                                                   iloc.bh);
3453                         brelse(iloc.bh);
3454                 }
3455         }
3456         ext4_std_error(inode->i_sb, err);
3457         return err;
3458 }
3459 #endif
3460
3461 int ext4_change_inode_journal_flag(struct inode *inode, int val)
3462 {
3463         journal_t *journal;
3464         handle_t *handle;
3465         int err;
3466
3467         /*
3468          * We have to be very careful here: changing a data block's
3469          * journaling status dynamically is dangerous.  If we write a
3470          * data block to the journal, change the status and then delete
3471          * that block, we risk forgetting to revoke the old log record
3472          * from the journal and so a subsequent replay can corrupt data.
3473          * So, first we make sure that the journal is empty and that
3474          * nobody is changing anything.
3475          */
3476
3477         journal = EXT4_JOURNAL(inode);
3478         if (is_journal_aborted(journal))
3479                 return -EROFS;
3480
3481         jbd2_journal_lock_updates(journal);
3482         jbd2_journal_flush(journal);
3483
3484         /*
3485          * OK, there are no updates running now, and all cached data is
3486          * synced to disk.  We are now in a completely consistent state
3487          * which doesn't have anything in the journal, and we know that
3488          * no filesystem updates are running, so it is safe to modify
3489          * the inode's in-core data-journaling state flag now.
3490          */
3491
3492         if (val)
3493                 EXT4_I(inode)->i_flags |= EXT4_JOURNAL_DATA_FL;
3494         else
3495                 EXT4_I(inode)->i_flags &= ~EXT4_JOURNAL_DATA_FL;
3496         ext4_set_aops(inode);
3497
3498         jbd2_journal_unlock_updates(journal);
3499
3500         /* Finally we can mark the inode as dirty. */
3501
3502         handle = ext4_journal_start(inode, 1);
3503         if (IS_ERR(handle))
3504                 return PTR_ERR(handle);
3505
3506         err = ext4_mark_inode_dirty(handle, inode);
3507         handle->h_sync = 1;
3508         ext4_journal_stop(handle);
3509         ext4_std_error(inode->i_sb, err);
3510
3511         return err;
3512 }
3513
3514 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
3515 {
3516         return !buffer_mapped(bh);
3517 }
3518
3519 int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
3520 {
3521         loff_t size;
3522         unsigned long len;
3523         int ret = -EINVAL;
3524         struct file *file = vma->vm_file;
3525         struct inode *inode = file->f_path.dentry->d_inode;
3526         struct address_space *mapping = inode->i_mapping;
3527
3528         /*
3529          * Get i_alloc_sem to stop truncates messing with the inode. We cannot
3530          * get i_mutex because we are already holding mmap_sem.
3531          */
3532         down_read(&inode->i_alloc_sem);
3533         size = i_size_read(inode);
3534         if (page->mapping != mapping || size <= page_offset(page)
3535             || !PageUptodate(page)) {
3536                 /* page got truncated from under us? */
3537                 goto out_unlock;
3538         }
3539         ret = 0;
3540         if (PageMappedToDisk(page))
3541                 goto out_unlock;
3542
3543         if (page->index == size >> PAGE_CACHE_SHIFT)
3544                 len = size & ~PAGE_CACHE_MASK;
3545         else
3546                 len = PAGE_CACHE_SIZE;
3547
3548         if (page_has_buffers(page)) {
3549                 /* return if we have all the buffers mapped */
3550                 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
3551                                        ext4_bh_unmapped))
3552                         goto out_unlock;
3553         }
3554         /*
3555          * OK, we need to fill the hole... Do write_begin write_end
3556          * to do block allocation/reservation.We are not holding
3557          * inode.i__mutex here. That allow * parallel write_begin,
3558          * write_end call. lock_page prevent this from happening
3559          * on the same page though
3560          */
3561         ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
3562                         len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
3563         if (ret < 0)
3564                 goto out_unlock;
3565         ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
3566                         len, len, page, NULL);
3567         if (ret < 0)
3568                 goto out_unlock;
3569         ret = 0;
3570 out_unlock:
3571         up_read(&inode->i_alloc_sem);
3572         return ret;
3573 }