2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd2.h>
37 #include <linux/highuid.h>
38 #include <linux/pagemap.h>
39 #include <linux/quotaops.h>
40 #include <linux/string.h>
41 #include <linux/slab.h>
42 #include <linux/falloc.h>
43 #include <linux/ext4_fs_extents.h>
44 #include <asm/uaccess.h>
49 * combine low and high parts of physical block number into ext4_fsblk_t
51 static ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
55 block = le32_to_cpu(ex->ee_start_lo);
56 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
62 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
64 ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
68 block = le32_to_cpu(ix->ei_leaf_lo);
69 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
74 * ext4_ext_store_pblock:
75 * stores a large physical block number into an extent struct,
76 * breaking it into parts
78 void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
80 ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
81 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
85 * ext4_idx_store_pblock:
86 * stores a large physical block number into an index struct,
87 * breaking it into parts
89 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
91 ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
92 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
95 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
99 if (handle->h_buffer_credits > needed)
101 if (!ext4_journal_extend(handle, needed))
103 err = ext4_journal_restart(handle, needed);
113 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
114 struct ext4_ext_path *path)
117 /* path points to block */
118 return ext4_journal_get_write_access(handle, path->p_bh);
120 /* path points to leaf/index in inode body */
121 /* we use in-core data, no need to protect them */
131 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
132 struct ext4_ext_path *path)
136 /* path points to block */
137 err = ext4_journal_dirty_metadata(handle, path->p_bh);
139 /* path points to leaf/index in inode body */
140 err = ext4_mark_inode_dirty(handle, inode);
145 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
146 struct ext4_ext_path *path,
149 struct ext4_inode_info *ei = EXT4_I(inode);
150 ext4_fsblk_t bg_start;
151 ext4_grpblk_t colour;
155 struct ext4_extent *ex;
156 depth = path->p_depth;
158 /* try to predict block placement */
159 ex = path[depth].p_ext;
161 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
163 /* it looks like index is empty;
164 * try to find starting block from index itself */
165 if (path[depth].p_bh)
166 return path[depth].p_bh->b_blocknr;
169 /* OK. use inode's group */
170 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
171 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
172 colour = (current->pid % 16) *
173 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
174 return bg_start + colour + block;
178 ext4_ext_new_block(handle_t *handle, struct inode *inode,
179 struct ext4_ext_path *path,
180 struct ext4_extent *ex, int *err)
182 ext4_fsblk_t goal, newblock;
184 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
185 newblock = ext4_new_block(handle, inode, goal, err);
189 static int ext4_ext_space_block(struct inode *inode)
193 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
194 / sizeof(struct ext4_extent);
195 #ifdef AGGRESSIVE_TEST
202 static int ext4_ext_space_block_idx(struct inode *inode)
206 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
207 / sizeof(struct ext4_extent_idx);
208 #ifdef AGGRESSIVE_TEST
215 static int ext4_ext_space_root(struct inode *inode)
219 size = sizeof(EXT4_I(inode)->i_data);
220 size -= sizeof(struct ext4_extent_header);
221 size /= sizeof(struct ext4_extent);
222 #ifdef AGGRESSIVE_TEST
229 static int ext4_ext_space_root_idx(struct inode *inode)
233 size = sizeof(EXT4_I(inode)->i_data);
234 size -= sizeof(struct ext4_extent_header);
235 size /= sizeof(struct ext4_extent_idx);
236 #ifdef AGGRESSIVE_TEST
244 ext4_ext_max_entries(struct inode *inode, int depth)
248 if (depth == ext_depth(inode)) {
250 max = ext4_ext_space_root(inode);
252 max = ext4_ext_space_root_idx(inode);
255 max = ext4_ext_space_block(inode);
257 max = ext4_ext_space_block_idx(inode);
263 static int __ext4_ext_check_header(const char *function, struct inode *inode,
264 struct ext4_extent_header *eh,
267 const char *error_msg;
270 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
271 error_msg = "invalid magic";
274 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
275 error_msg = "unexpected eh_depth";
278 if (unlikely(eh->eh_max == 0)) {
279 error_msg = "invalid eh_max";
282 max = ext4_ext_max_entries(inode, depth);
283 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
284 error_msg = "too large eh_max";
287 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
288 error_msg = "invalid eh_entries";
294 ext4_error(inode->i_sb, function,
295 "bad header in inode #%lu: %s - magic %x, "
296 "entries %u, max %u(%u), depth %u(%u)",
297 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
298 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
299 max, le16_to_cpu(eh->eh_depth), depth);
304 #define ext4_ext_check_header(inode, eh, depth) \
305 __ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
308 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
310 int k, l = path->p_depth;
313 for (k = 0; k <= l; k++, path++) {
315 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
316 idx_pblock(path->p_idx));
317 } else if (path->p_ext) {
318 ext_debug(" %d:%d:%llu ",
319 le32_to_cpu(path->p_ext->ee_block),
320 ext4_ext_get_actual_len(path->p_ext),
321 ext_pblock(path->p_ext));
328 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
330 int depth = ext_depth(inode);
331 struct ext4_extent_header *eh;
332 struct ext4_extent *ex;
338 eh = path[depth].p_hdr;
339 ex = EXT_FIRST_EXTENT(eh);
341 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
342 ext_debug("%d:%d:%llu ", le32_to_cpu(ex->ee_block),
343 ext4_ext_get_actual_len(ex), ext_pblock(ex));
348 #define ext4_ext_show_path(inode,path)
349 #define ext4_ext_show_leaf(inode,path)
352 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
354 int depth = path->p_depth;
357 for (i = 0; i <= depth; i++, path++)
365 * ext4_ext_binsearch_idx:
366 * binary search for the closest index of the given block
367 * the header must be checked before calling this
370 ext4_ext_binsearch_idx(struct inode *inode,
371 struct ext4_ext_path *path, ext4_lblk_t block)
373 struct ext4_extent_header *eh = path->p_hdr;
374 struct ext4_extent_idx *r, *l, *m;
377 ext_debug("binsearch for %u(idx): ", block);
379 l = EXT_FIRST_INDEX(eh) + 1;
380 r = EXT_LAST_INDEX(eh);
383 if (block < le32_to_cpu(m->ei_block))
387 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
388 m, le32_to_cpu(m->ei_block),
389 r, le32_to_cpu(r->ei_block));
393 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
394 idx_pblock(path->p_idx));
396 #ifdef CHECK_BINSEARCH
398 struct ext4_extent_idx *chix, *ix;
401 chix = ix = EXT_FIRST_INDEX(eh);
402 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
404 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
405 printk("k=%d, ix=0x%p, first=0x%p\n", k,
406 ix, EXT_FIRST_INDEX(eh));
408 le32_to_cpu(ix->ei_block),
409 le32_to_cpu(ix[-1].ei_block));
411 BUG_ON(k && le32_to_cpu(ix->ei_block)
412 <= le32_to_cpu(ix[-1].ei_block));
413 if (block < le32_to_cpu(ix->ei_block))
417 BUG_ON(chix != path->p_idx);
424 * ext4_ext_binsearch:
425 * binary search for closest extent of the given block
426 * the header must be checked before calling this
429 ext4_ext_binsearch(struct inode *inode,
430 struct ext4_ext_path *path, ext4_lblk_t block)
432 struct ext4_extent_header *eh = path->p_hdr;
433 struct ext4_extent *r, *l, *m;
435 if (eh->eh_entries == 0) {
437 * this leaf is empty:
438 * we get such a leaf in split/add case
443 ext_debug("binsearch for %u: ", block);
445 l = EXT_FIRST_EXTENT(eh) + 1;
446 r = EXT_LAST_EXTENT(eh);
450 if (block < le32_to_cpu(m->ee_block))
454 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
455 m, le32_to_cpu(m->ee_block),
456 r, le32_to_cpu(r->ee_block));
460 ext_debug(" -> %d:%llu:%d ",
461 le32_to_cpu(path->p_ext->ee_block),
462 ext_pblock(path->p_ext),
463 ext4_ext_get_actual_len(path->p_ext));
465 #ifdef CHECK_BINSEARCH
467 struct ext4_extent *chex, *ex;
470 chex = ex = EXT_FIRST_EXTENT(eh);
471 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
472 BUG_ON(k && le32_to_cpu(ex->ee_block)
473 <= le32_to_cpu(ex[-1].ee_block));
474 if (block < le32_to_cpu(ex->ee_block))
478 BUG_ON(chex != path->p_ext);
484 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
486 struct ext4_extent_header *eh;
488 eh = ext_inode_hdr(inode);
491 eh->eh_magic = EXT4_EXT_MAGIC;
492 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
493 ext4_mark_inode_dirty(handle, inode);
494 ext4_ext_invalidate_cache(inode);
498 struct ext4_ext_path *
499 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
500 struct ext4_ext_path *path)
502 struct ext4_extent_header *eh;
503 struct buffer_head *bh;
504 short int depth, i, ppos = 0, alloc = 0;
506 eh = ext_inode_hdr(inode);
507 depth = ext_depth(inode);
508 if (ext4_ext_check_header(inode, eh, depth))
509 return ERR_PTR(-EIO);
512 /* account possible depth increase */
514 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
517 return ERR_PTR(-ENOMEM);
523 /* walk through the tree */
525 ext_debug("depth %d: num %d, max %d\n",
526 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
528 ext4_ext_binsearch_idx(inode, path + ppos, block);
529 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
530 path[ppos].p_depth = i;
531 path[ppos].p_ext = NULL;
533 bh = sb_bread(inode->i_sb, path[ppos].p_block);
537 eh = ext_block_hdr(bh);
539 BUG_ON(ppos > depth);
540 path[ppos].p_bh = bh;
541 path[ppos].p_hdr = eh;
544 if (ext4_ext_check_header(inode, eh, i))
548 path[ppos].p_depth = i;
549 path[ppos].p_hdr = eh;
550 path[ppos].p_ext = NULL;
551 path[ppos].p_idx = NULL;
554 ext4_ext_binsearch(inode, path + ppos, block);
556 ext4_ext_show_path(inode, path);
561 ext4_ext_drop_refs(path);
564 return ERR_PTR(-EIO);
568 * ext4_ext_insert_index:
569 * insert new index [@logical;@ptr] into the block at @curp;
570 * check where to insert: before @curp or after @curp
572 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
573 struct ext4_ext_path *curp,
574 int logical, ext4_fsblk_t ptr)
576 struct ext4_extent_idx *ix;
579 err = ext4_ext_get_access(handle, inode, curp);
583 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
584 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
585 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
587 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
588 len = (len - 1) * sizeof(struct ext4_extent_idx);
589 len = len < 0 ? 0 : len;
590 ext_debug("insert new index %d after: %llu. "
591 "move %d from 0x%p to 0x%p\n",
593 (curp->p_idx + 1), (curp->p_idx + 2));
594 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
596 ix = curp->p_idx + 1;
599 len = len * sizeof(struct ext4_extent_idx);
600 len = len < 0 ? 0 : len;
601 ext_debug("insert new index %d before: %llu. "
602 "move %d from 0x%p to 0x%p\n",
604 curp->p_idx, (curp->p_idx + 1));
605 memmove(curp->p_idx + 1, curp->p_idx, len);
609 ix->ei_block = cpu_to_le32(logical);
610 ext4_idx_store_pblock(ix, ptr);
611 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
613 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
614 > le16_to_cpu(curp->p_hdr->eh_max));
615 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
617 err = ext4_ext_dirty(handle, inode, curp);
618 ext4_std_error(inode->i_sb, err);
625 * inserts new subtree into the path, using free index entry
627 * - allocates all needed blocks (new leaf and all intermediate index blocks)
628 * - makes decision where to split
629 * - moves remaining extents and index entries (right to the split point)
630 * into the newly allocated blocks
631 * - initializes subtree
633 static int ext4_ext_split(handle_t *handle, struct inode *inode,
634 struct ext4_ext_path *path,
635 struct ext4_extent *newext, int at)
637 struct buffer_head *bh = NULL;
638 int depth = ext_depth(inode);
639 struct ext4_extent_header *neh;
640 struct ext4_extent_idx *fidx;
641 struct ext4_extent *ex;
643 ext4_fsblk_t newblock, oldblock;
645 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
648 /* make decision: where to split? */
649 /* FIXME: now decision is simplest: at current extent */
651 /* if current leaf will be split, then we should use
652 * border from split point */
653 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
654 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
655 border = path[depth].p_ext[1].ee_block;
656 ext_debug("leaf will be split."
657 " next leaf starts at %d\n",
658 le32_to_cpu(border));
660 border = newext->ee_block;
661 ext_debug("leaf will be added."
662 " next leaf starts at %d\n",
663 le32_to_cpu(border));
667 * If error occurs, then we break processing
668 * and mark filesystem read-only. index won't
669 * be inserted and tree will be in consistent
670 * state. Next mount will repair buffers too.
674 * Get array to track all allocated blocks.
675 * We need this to handle errors and free blocks
678 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
682 /* allocate all needed blocks */
683 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
684 for (a = 0; a < depth - at; a++) {
685 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
688 ablocks[a] = newblock;
691 /* initialize new leaf */
692 newblock = ablocks[--a];
693 BUG_ON(newblock == 0);
694 bh = sb_getblk(inode->i_sb, newblock);
701 err = ext4_journal_get_create_access(handle, bh);
705 neh = ext_block_hdr(bh);
707 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
708 neh->eh_magic = EXT4_EXT_MAGIC;
710 ex = EXT_FIRST_EXTENT(neh);
712 /* move remainder of path[depth] to the new leaf */
713 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
714 /* start copy from next extent */
715 /* TODO: we could do it by single memmove */
718 while (path[depth].p_ext <=
719 EXT_MAX_EXTENT(path[depth].p_hdr)) {
720 ext_debug("move %d:%llu:%d in new leaf %llu\n",
721 le32_to_cpu(path[depth].p_ext->ee_block),
722 ext_pblock(path[depth].p_ext),
723 ext4_ext_get_actual_len(path[depth].p_ext),
725 /*memmove(ex++, path[depth].p_ext++,
726 sizeof(struct ext4_extent));
732 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
733 neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
736 set_buffer_uptodate(bh);
739 err = ext4_journal_dirty_metadata(handle, bh);
745 /* correct old leaf */
747 err = ext4_ext_get_access(handle, inode, path + depth);
750 path[depth].p_hdr->eh_entries =
751 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
752 err = ext4_ext_dirty(handle, inode, path + depth);
758 /* create intermediate indexes */
762 ext_debug("create %d intermediate indices\n", k);
763 /* insert new index into current index block */
764 /* current depth stored in i var */
768 newblock = ablocks[--a];
769 bh = sb_getblk(inode->i_sb, newblock);
776 err = ext4_journal_get_create_access(handle, bh);
780 neh = ext_block_hdr(bh);
781 neh->eh_entries = cpu_to_le16(1);
782 neh->eh_magic = EXT4_EXT_MAGIC;
783 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
784 neh->eh_depth = cpu_to_le16(depth - i);
785 fidx = EXT_FIRST_INDEX(neh);
786 fidx->ei_block = border;
787 ext4_idx_store_pblock(fidx, oldblock);
789 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
790 i, newblock, le32_to_cpu(border), oldblock);
795 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
796 EXT_MAX_INDEX(path[i].p_hdr));
797 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
798 EXT_LAST_INDEX(path[i].p_hdr));
799 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
800 ext_debug("%d: move %d:%llu in new index %llu\n", i,
801 le32_to_cpu(path[i].p_idx->ei_block),
802 idx_pblock(path[i].p_idx),
804 /*memmove(++fidx, path[i].p_idx++,
805 sizeof(struct ext4_extent_idx));
807 BUG_ON(neh->eh_entries > neh->eh_max);*/
812 memmove(++fidx, path[i].p_idx - m,
813 sizeof(struct ext4_extent_idx) * m);
815 cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
817 set_buffer_uptodate(bh);
820 err = ext4_journal_dirty_metadata(handle, bh);
826 /* correct old index */
828 err = ext4_ext_get_access(handle, inode, path + i);
831 path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
832 err = ext4_ext_dirty(handle, inode, path + i);
840 /* insert new index */
841 err = ext4_ext_insert_index(handle, inode, path + at,
842 le32_to_cpu(border), newblock);
846 if (buffer_locked(bh))
852 /* free all allocated blocks in error case */
853 for (i = 0; i < depth; i++) {
856 ext4_free_blocks(handle, inode, ablocks[i], 1, 1);
865 * ext4_ext_grow_indepth:
866 * implements tree growing procedure:
867 * - allocates new block
868 * - moves top-level data (index block or leaf) into the new block
869 * - initializes new top-level, creating index that points to the
872 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
873 struct ext4_ext_path *path,
874 struct ext4_extent *newext)
876 struct ext4_ext_path *curp = path;
877 struct ext4_extent_header *neh;
878 struct ext4_extent_idx *fidx;
879 struct buffer_head *bh;
880 ext4_fsblk_t newblock;
883 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
887 bh = sb_getblk(inode->i_sb, newblock);
890 ext4_std_error(inode->i_sb, err);
895 err = ext4_journal_get_create_access(handle, bh);
901 /* move top-level index/leaf into new block */
902 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
904 /* set size of new block */
905 neh = ext_block_hdr(bh);
906 /* old root could have indexes or leaves
907 * so calculate e_max right way */
908 if (ext_depth(inode))
909 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
911 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
912 neh->eh_magic = EXT4_EXT_MAGIC;
913 set_buffer_uptodate(bh);
916 err = ext4_journal_dirty_metadata(handle, bh);
920 /* create index in new top-level index: num,max,pointer */
921 err = ext4_ext_get_access(handle, inode, curp);
925 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
926 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
927 curp->p_hdr->eh_entries = cpu_to_le16(1);
928 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
930 if (path[0].p_hdr->eh_depth)
931 curp->p_idx->ei_block =
932 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
934 curp->p_idx->ei_block =
935 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
936 ext4_idx_store_pblock(curp->p_idx, newblock);
938 neh = ext_inode_hdr(inode);
939 fidx = EXT_FIRST_INDEX(neh);
940 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
941 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
942 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
944 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
945 err = ext4_ext_dirty(handle, inode, curp);
953 * ext4_ext_create_new_leaf:
954 * finds empty index and adds new leaf.
955 * if no free index is found, then it requests in-depth growing.
957 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
958 struct ext4_ext_path *path,
959 struct ext4_extent *newext)
961 struct ext4_ext_path *curp;
962 int depth, i, err = 0;
965 i = depth = ext_depth(inode);
967 /* walk up to the tree and look for free index entry */
969 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
974 /* we use already allocated block for index block,
975 * so subsequent data blocks should be contiguous */
976 if (EXT_HAS_FREE_INDEX(curp)) {
977 /* if we found index with free entry, then use that
978 * entry: create all needed subtree and add new leaf */
979 err = ext4_ext_split(handle, inode, path, newext, i);
982 ext4_ext_drop_refs(path);
983 path = ext4_ext_find_extent(inode,
984 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
989 /* tree is full, time to grow in depth */
990 err = ext4_ext_grow_indepth(handle, inode, path, newext);
995 ext4_ext_drop_refs(path);
996 path = ext4_ext_find_extent(inode,
997 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1000 err = PTR_ERR(path);
1005 * only first (depth 0 -> 1) produces free space;
1006 * in all other cases we have to split the grown tree
1008 depth = ext_depth(inode);
1009 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1010 /* now we need to split */
1020 * search the closest allocated block to the left for *logical
1021 * and returns it at @logical + it's physical address at @phys
1022 * if *logical is the smallest allocated block, the function
1023 * returns 0 at @phys
1024 * return value contains 0 (success) or error code
1027 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1028 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1030 struct ext4_extent_idx *ix;
1031 struct ext4_extent *ex;
1034 BUG_ON(path == NULL);
1035 depth = path->p_depth;
1038 if (depth == 0 && path->p_ext == NULL)
1041 /* usually extent in the path covers blocks smaller
1042 * then *logical, but it can be that extent is the
1043 * first one in the file */
1045 ex = path[depth].p_ext;
1046 if (*logical < le32_to_cpu(ex->ee_block)) {
1047 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1048 while (--depth >= 0) {
1049 ix = path[depth].p_idx;
1050 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1055 BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
1057 *logical = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1;
1058 *phys = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - 1;
1063 * search the closest allocated block to the right for *logical
1064 * and returns it at @logical + it's physical address at @phys
1065 * if *logical is the smallest allocated block, the function
1066 * returns 0 at @phys
1067 * return value contains 0 (success) or error code
1070 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1071 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1073 struct buffer_head *bh = NULL;
1074 struct ext4_extent_header *eh;
1075 struct ext4_extent_idx *ix;
1076 struct ext4_extent *ex;
1080 BUG_ON(path == NULL);
1081 depth = path->p_depth;
1084 if (depth == 0 && path->p_ext == NULL)
1087 /* usually extent in the path covers blocks smaller
1088 * then *logical, but it can be that extent is the
1089 * first one in the file */
1091 ex = path[depth].p_ext;
1092 if (*logical < le32_to_cpu(ex->ee_block)) {
1093 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1094 while (--depth >= 0) {
1095 ix = path[depth].p_idx;
1096 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1098 *logical = le32_to_cpu(ex->ee_block);
1099 *phys = ext_pblock(ex);
1103 BUG_ON(*logical < le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len));
1105 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1106 /* next allocated block in this leaf */
1108 *logical = le32_to_cpu(ex->ee_block);
1109 *phys = ext_pblock(ex);
1113 /* go up and search for index to the right */
1114 while (--depth >= 0) {
1115 ix = path[depth].p_idx;
1116 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1121 /* we've gone up to the root and
1122 * found no index to the right */
1126 /* we've found index to the right, let's
1127 * follow it and find the closest allocated
1128 * block to the right */
1130 block = idx_pblock(ix);
1131 while (++depth < path->p_depth) {
1132 bh = sb_bread(inode->i_sb, block);
1135 eh = ext_block_hdr(bh);
1136 if (ext4_ext_check_header(inode, eh, depth)) {
1140 ix = EXT_FIRST_INDEX(eh);
1141 block = idx_pblock(ix);
1145 bh = sb_bread(inode->i_sb, block);
1148 eh = ext_block_hdr(bh);
1149 if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
1153 ex = EXT_FIRST_EXTENT(eh);
1154 *logical = le32_to_cpu(ex->ee_block);
1155 *phys = ext_pblock(ex);
1162 * ext4_ext_next_allocated_block:
1163 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1164 * NOTE: it considers block number from index entry as
1165 * allocated block. Thus, index entries have to be consistent
1169 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1173 BUG_ON(path == NULL);
1174 depth = path->p_depth;
1176 if (depth == 0 && path->p_ext == NULL)
1177 return EXT_MAX_BLOCK;
1179 while (depth >= 0) {
1180 if (depth == path->p_depth) {
1182 if (path[depth].p_ext !=
1183 EXT_LAST_EXTENT(path[depth].p_hdr))
1184 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1187 if (path[depth].p_idx !=
1188 EXT_LAST_INDEX(path[depth].p_hdr))
1189 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1194 return EXT_MAX_BLOCK;
1198 * ext4_ext_next_leaf_block:
1199 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1201 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1202 struct ext4_ext_path *path)
1206 BUG_ON(path == NULL);
1207 depth = path->p_depth;
1209 /* zero-tree has no leaf blocks at all */
1211 return EXT_MAX_BLOCK;
1213 /* go to index block */
1216 while (depth >= 0) {
1217 if (path[depth].p_idx !=
1218 EXT_LAST_INDEX(path[depth].p_hdr))
1219 return (ext4_lblk_t)
1220 le32_to_cpu(path[depth].p_idx[1].ei_block);
1224 return EXT_MAX_BLOCK;
1228 * ext4_ext_correct_indexes:
1229 * if leaf gets modified and modified extent is first in the leaf,
1230 * then we have to correct all indexes above.
1231 * TODO: do we need to correct tree in all cases?
1233 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1234 struct ext4_ext_path *path)
1236 struct ext4_extent_header *eh;
1237 int depth = ext_depth(inode);
1238 struct ext4_extent *ex;
1242 eh = path[depth].p_hdr;
1243 ex = path[depth].p_ext;
1248 /* there is no tree at all */
1252 if (ex != EXT_FIRST_EXTENT(eh)) {
1253 /* we correct tree if first leaf got modified only */
1258 * TODO: we need correction if border is smaller than current one
1261 border = path[depth].p_ext->ee_block;
1262 err = ext4_ext_get_access(handle, inode, path + k);
1265 path[k].p_idx->ei_block = border;
1266 err = ext4_ext_dirty(handle, inode, path + k);
1271 /* change all left-side indexes */
1272 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1274 err = ext4_ext_get_access(handle, inode, path + k);
1277 path[k].p_idx->ei_block = border;
1278 err = ext4_ext_dirty(handle, inode, path + k);
1287 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1288 struct ext4_extent *ex2)
1290 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1293 * Make sure that either both extents are uninitialized, or
1296 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1299 if (ext4_ext_is_uninitialized(ex1))
1300 max_len = EXT_UNINIT_MAX_LEN;
1302 max_len = EXT_INIT_MAX_LEN;
1304 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1305 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1307 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1308 le32_to_cpu(ex2->ee_block))
1312 * To allow future support for preallocated extents to be added
1313 * as an RO_COMPAT feature, refuse to merge to extents if
1314 * this can result in the top bit of ee_len being set.
1316 if (ext1_ee_len + ext2_ee_len > max_len)
1318 #ifdef AGGRESSIVE_TEST
1319 if (le16_to_cpu(ex1->ee_len) >= 4)
1323 if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1329 * This function tries to merge the "ex" extent to the next extent in the tree.
1330 * It always tries to merge towards right. If you want to merge towards
1331 * left, pass "ex - 1" as argument instead of "ex".
1332 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1333 * 1 if they got merged.
1335 int ext4_ext_try_to_merge(struct inode *inode,
1336 struct ext4_ext_path *path,
1337 struct ext4_extent *ex)
1339 struct ext4_extent_header *eh;
1340 unsigned int depth, len;
1342 int uninitialized = 0;
1344 depth = ext_depth(inode);
1345 BUG_ON(path[depth].p_hdr == NULL);
1346 eh = path[depth].p_hdr;
1348 while (ex < EXT_LAST_EXTENT(eh)) {
1349 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1351 /* merge with next extent! */
1352 if (ext4_ext_is_uninitialized(ex))
1354 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1355 + ext4_ext_get_actual_len(ex + 1));
1357 ext4_ext_mark_uninitialized(ex);
1359 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1360 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1361 * sizeof(struct ext4_extent);
1362 memmove(ex + 1, ex + 2, len);
1364 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
1366 WARN_ON(eh->eh_entries == 0);
1367 if (!eh->eh_entries)
1368 ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1369 "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1376 * check if a portion of the "newext" extent overlaps with an
1379 * If there is an overlap discovered, it updates the length of the newext
1380 * such that there will be no overlap, and then returns 1.
1381 * If there is no overlap found, it returns 0.
1383 unsigned int ext4_ext_check_overlap(struct inode *inode,
1384 struct ext4_extent *newext,
1385 struct ext4_ext_path *path)
1388 unsigned int depth, len1;
1389 unsigned int ret = 0;
1391 b1 = le32_to_cpu(newext->ee_block);
1392 len1 = ext4_ext_get_actual_len(newext);
1393 depth = ext_depth(inode);
1394 if (!path[depth].p_ext)
1396 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1399 * get the next allocated block if the extent in the path
1400 * is before the requested block(s)
1403 b2 = ext4_ext_next_allocated_block(path);
1404 if (b2 == EXT_MAX_BLOCK)
1408 /* check for wrap through zero on extent logical start block*/
1409 if (b1 + len1 < b1) {
1410 len1 = EXT_MAX_BLOCK - b1;
1411 newext->ee_len = cpu_to_le16(len1);
1415 /* check for overlap */
1416 if (b1 + len1 > b2) {
1417 newext->ee_len = cpu_to_le16(b2 - b1);
1425 * ext4_ext_insert_extent:
1426 * tries to merge requsted extent into the existing extent or
1427 * inserts requested extent as new one into the tree,
1428 * creating new leaf in the no-space case.
1430 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1431 struct ext4_ext_path *path,
1432 struct ext4_extent *newext)
1434 struct ext4_extent_header * eh;
1435 struct ext4_extent *ex, *fex;
1436 struct ext4_extent *nearex; /* nearest extent */
1437 struct ext4_ext_path *npath = NULL;
1438 int depth, len, err;
1440 unsigned uninitialized = 0;
1442 BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1443 depth = ext_depth(inode);
1444 ex = path[depth].p_ext;
1445 BUG_ON(path[depth].p_hdr == NULL);
1447 /* try to insert block into found extent and return */
1448 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1449 ext_debug("append %d block to %d:%d (from %llu)\n",
1450 ext4_ext_get_actual_len(newext),
1451 le32_to_cpu(ex->ee_block),
1452 ext4_ext_get_actual_len(ex), ext_pblock(ex));
1453 err = ext4_ext_get_access(handle, inode, path + depth);
1458 * ext4_can_extents_be_merged should have checked that either
1459 * both extents are uninitialized, or both aren't. Thus we
1460 * need to check only one of them here.
1462 if (ext4_ext_is_uninitialized(ex))
1464 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1465 + ext4_ext_get_actual_len(newext));
1467 ext4_ext_mark_uninitialized(ex);
1468 eh = path[depth].p_hdr;
1474 depth = ext_depth(inode);
1475 eh = path[depth].p_hdr;
1476 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1479 /* probably next leaf has space for us? */
1480 fex = EXT_LAST_EXTENT(eh);
1481 next = ext4_ext_next_leaf_block(inode, path);
1482 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1483 && next != EXT_MAX_BLOCK) {
1484 ext_debug("next leaf block - %d\n", next);
1485 BUG_ON(npath != NULL);
1486 npath = ext4_ext_find_extent(inode, next, NULL);
1488 return PTR_ERR(npath);
1489 BUG_ON(npath->p_depth != path->p_depth);
1490 eh = npath[depth].p_hdr;
1491 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1492 ext_debug("next leaf isnt full(%d)\n",
1493 le16_to_cpu(eh->eh_entries));
1497 ext_debug("next leaf has no free space(%d,%d)\n",
1498 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1502 * There is no free space in the found leaf.
1503 * We're gonna add a new leaf in the tree.
1505 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1508 depth = ext_depth(inode);
1509 eh = path[depth].p_hdr;
1512 nearex = path[depth].p_ext;
1514 err = ext4_ext_get_access(handle, inode, path + depth);
1519 /* there is no extent in this leaf, create first one */
1520 ext_debug("first extent in the leaf: %d:%llu:%d\n",
1521 le32_to_cpu(newext->ee_block),
1523 ext4_ext_get_actual_len(newext));
1524 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1525 } else if (le32_to_cpu(newext->ee_block)
1526 > le32_to_cpu(nearex->ee_block)) {
1527 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1528 if (nearex != EXT_LAST_EXTENT(eh)) {
1529 len = EXT_MAX_EXTENT(eh) - nearex;
1530 len = (len - 1) * sizeof(struct ext4_extent);
1531 len = len < 0 ? 0 : len;
1532 ext_debug("insert %d:%llu:%d after: nearest 0x%p, "
1533 "move %d from 0x%p to 0x%p\n",
1534 le32_to_cpu(newext->ee_block),
1536 ext4_ext_get_actual_len(newext),
1537 nearex, len, nearex + 1, nearex + 2);
1538 memmove(nearex + 2, nearex + 1, len);
1540 path[depth].p_ext = nearex + 1;
1542 BUG_ON(newext->ee_block == nearex->ee_block);
1543 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1544 len = len < 0 ? 0 : len;
1545 ext_debug("insert %d:%llu:%d before: nearest 0x%p, "
1546 "move %d from 0x%p to 0x%p\n",
1547 le32_to_cpu(newext->ee_block),
1549 ext4_ext_get_actual_len(newext),
1550 nearex, len, nearex + 1, nearex + 2);
1551 memmove(nearex + 1, nearex, len);
1552 path[depth].p_ext = nearex;
1555 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1556 nearex = path[depth].p_ext;
1557 nearex->ee_block = newext->ee_block;
1558 ext4_ext_store_pblock(nearex, ext_pblock(newext));
1559 nearex->ee_len = newext->ee_len;
1562 /* try to merge extents to the right */
1563 ext4_ext_try_to_merge(inode, path, nearex);
1565 /* try to merge extents to the left */
1567 /* time to correct all indexes above */
1568 err = ext4_ext_correct_indexes(handle, inode, path);
1572 err = ext4_ext_dirty(handle, inode, path + depth);
1576 ext4_ext_drop_refs(npath);
1579 ext4_ext_tree_changed(inode);
1580 ext4_ext_invalidate_cache(inode);
1585 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1586 __u32 len, ext4_fsblk_t start, int type)
1588 struct ext4_ext_cache *cex;
1590 cex = &EXT4_I(inode)->i_cached_extent;
1591 cex->ec_type = type;
1592 cex->ec_block = block;
1594 cex->ec_start = start;
1598 * ext4_ext_put_gap_in_cache:
1599 * calculate boundaries of the gap that the requested block fits into
1600 * and cache this gap
1603 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1606 int depth = ext_depth(inode);
1609 struct ext4_extent *ex;
1611 ex = path[depth].p_ext;
1613 /* there is no extent yet, so gap is [0;-] */
1615 len = EXT_MAX_BLOCK;
1616 ext_debug("cache gap(whole file):");
1617 } else if (block < le32_to_cpu(ex->ee_block)) {
1619 len = le32_to_cpu(ex->ee_block) - block;
1620 ext_debug("cache gap(before): %u [%u:%u]",
1622 le32_to_cpu(ex->ee_block),
1623 ext4_ext_get_actual_len(ex));
1624 } else if (block >= le32_to_cpu(ex->ee_block)
1625 + ext4_ext_get_actual_len(ex)) {
1627 lblock = le32_to_cpu(ex->ee_block)
1628 + ext4_ext_get_actual_len(ex);
1630 next = ext4_ext_next_allocated_block(path);
1631 ext_debug("cache gap(after): [%u:%u] %u",
1632 le32_to_cpu(ex->ee_block),
1633 ext4_ext_get_actual_len(ex),
1635 BUG_ON(next == lblock);
1636 len = next - lblock;
1642 ext_debug(" -> %u:%lu\n", lblock, len);
1643 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1647 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1648 struct ext4_extent *ex)
1650 struct ext4_ext_cache *cex;
1652 cex = &EXT4_I(inode)->i_cached_extent;
1654 /* has cache valid data? */
1655 if (cex->ec_type == EXT4_EXT_CACHE_NO)
1656 return EXT4_EXT_CACHE_NO;
1658 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1659 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1660 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1661 ex->ee_block = cpu_to_le32(cex->ec_block);
1662 ext4_ext_store_pblock(ex, cex->ec_start);
1663 ex->ee_len = cpu_to_le16(cex->ec_len);
1664 ext_debug("%u cached by %u:%u:%llu\n",
1666 cex->ec_block, cex->ec_len, cex->ec_start);
1667 return cex->ec_type;
1671 return EXT4_EXT_CACHE_NO;
1676 * removes index from the index block.
1677 * It's used in truncate case only, thus all requests are for
1678 * last index in the block only.
1680 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1681 struct ext4_ext_path *path)
1683 struct buffer_head *bh;
1687 /* free index block */
1689 leaf = idx_pblock(path->p_idx);
1690 BUG_ON(path->p_hdr->eh_entries == 0);
1691 err = ext4_ext_get_access(handle, inode, path);
1694 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1695 err = ext4_ext_dirty(handle, inode, path);
1698 ext_debug("index is empty, remove it, free block %llu\n", leaf);
1699 bh = sb_find_get_block(inode->i_sb, leaf);
1700 ext4_forget(handle, 1, inode, bh, leaf);
1701 ext4_free_blocks(handle, inode, leaf, 1, 1);
1706 * ext4_ext_calc_credits_for_insert:
1707 * This routine returns max. credits that the extent tree can consume.
1708 * It should be OK for low-performance paths like ->writepage()
1709 * To allow many writing processes to fit into a single transaction,
1710 * the caller should calculate credits under i_data_sem and
1711 * pass the actual path.
1713 int ext4_ext_calc_credits_for_insert(struct inode *inode,
1714 struct ext4_ext_path *path)
1719 /* probably there is space in leaf? */
1720 depth = ext_depth(inode);
1721 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1722 < le16_to_cpu(path[depth].p_hdr->eh_max))
1727 * given 32-bit logical block (4294967296 blocks), max. tree
1728 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1729 * Let's also add one more level for imbalance.
1733 /* allocation of new data block(s) */
1737 * tree can be full, so it would need to grow in depth:
1738 * we need one credit to modify old root, credits for
1739 * new root will be added in split accounting
1744 * Index split can happen, we would need:
1745 * allocate intermediate indexes (bitmap + group)
1746 * + change two blocks at each level, but root (already included)
1748 needed += (depth * 2) + (depth * 2);
1750 /* any allocation modifies superblock */
1756 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1757 struct ext4_extent *ex,
1758 ext4_lblk_t from, ext4_lblk_t to)
1760 struct buffer_head *bh;
1761 unsigned short ee_len = ext4_ext_get_actual_len(ex);
1762 int i, metadata = 0;
1764 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
1766 #ifdef EXTENTS_STATS
1768 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1769 spin_lock(&sbi->s_ext_stats_lock);
1770 sbi->s_ext_blocks += ee_len;
1771 sbi->s_ext_extents++;
1772 if (ee_len < sbi->s_ext_min)
1773 sbi->s_ext_min = ee_len;
1774 if (ee_len > sbi->s_ext_max)
1775 sbi->s_ext_max = ee_len;
1776 if (ext_depth(inode) > sbi->s_depth_max)
1777 sbi->s_depth_max = ext_depth(inode);
1778 spin_unlock(&sbi->s_ext_stats_lock);
1781 if (from >= le32_to_cpu(ex->ee_block)
1782 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
1787 num = le32_to_cpu(ex->ee_block) + ee_len - from;
1788 start = ext_pblock(ex) + ee_len - num;
1789 ext_debug("free last %u blocks starting %llu\n", num, start);
1790 for (i = 0; i < num; i++) {
1791 bh = sb_find_get_block(inode->i_sb, start + i);
1792 ext4_forget(handle, 0, inode, bh, start + i);
1794 ext4_free_blocks(handle, inode, start, num, metadata);
1795 } else if (from == le32_to_cpu(ex->ee_block)
1796 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
1797 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
1798 from, to, le32_to_cpu(ex->ee_block), ee_len);
1800 printk(KERN_INFO "strange request: removal(2) "
1801 "%u-%u from %u:%u\n",
1802 from, to, le32_to_cpu(ex->ee_block), ee_len);
1808 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1809 struct ext4_ext_path *path, ext4_lblk_t start)
1811 int err = 0, correct_index = 0;
1812 int depth = ext_depth(inode), credits;
1813 struct ext4_extent_header *eh;
1814 ext4_lblk_t a, b, block;
1816 ext4_lblk_t ex_ee_block;
1817 unsigned short ex_ee_len;
1818 unsigned uninitialized = 0;
1819 struct ext4_extent *ex;
1821 /* the header must be checked already in ext4_ext_remove_space() */
1822 ext_debug("truncate since %u in leaf\n", start);
1823 if (!path[depth].p_hdr)
1824 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1825 eh = path[depth].p_hdr;
1828 /* find where to start removing */
1829 ex = EXT_LAST_EXTENT(eh);
1831 ex_ee_block = le32_to_cpu(ex->ee_block);
1832 if (ext4_ext_is_uninitialized(ex))
1834 ex_ee_len = ext4_ext_get_actual_len(ex);
1836 while (ex >= EXT_FIRST_EXTENT(eh) &&
1837 ex_ee_block + ex_ee_len > start) {
1838 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1839 path[depth].p_ext = ex;
1841 a = ex_ee_block > start ? ex_ee_block : start;
1842 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1843 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1845 ext_debug(" border %u:%u\n", a, b);
1847 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1851 } else if (a != ex_ee_block) {
1852 /* remove tail of the extent */
1853 block = ex_ee_block;
1855 } else if (b != ex_ee_block + ex_ee_len - 1) {
1856 /* remove head of the extent */
1859 /* there is no "make a hole" API yet */
1862 /* remove whole extent: excellent! */
1863 block = ex_ee_block;
1865 BUG_ON(a != ex_ee_block);
1866 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1869 /* at present, extent can't cross block group: */
1870 /* leaf + bitmap + group desc + sb + inode */
1872 if (ex == EXT_FIRST_EXTENT(eh)) {
1874 credits += (ext_depth(inode)) + 1;
1877 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1880 handle = ext4_ext_journal_restart(handle, credits);
1881 if (IS_ERR(handle)) {
1882 err = PTR_ERR(handle);
1886 err = ext4_ext_get_access(handle, inode, path + depth);
1890 err = ext4_remove_blocks(handle, inode, ex, a, b);
1895 /* this extent is removed; mark slot entirely unused */
1896 ext4_ext_store_pblock(ex, 0);
1897 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1900 ex->ee_block = cpu_to_le32(block);
1901 ex->ee_len = cpu_to_le16(num);
1903 * Do not mark uninitialized if all the blocks in the
1904 * extent have been removed.
1906 if (uninitialized && num)
1907 ext4_ext_mark_uninitialized(ex);
1909 err = ext4_ext_dirty(handle, inode, path + depth);
1913 ext_debug("new extent: %u:%u:%llu\n", block, num,
1916 ex_ee_block = le32_to_cpu(ex->ee_block);
1917 ex_ee_len = ext4_ext_get_actual_len(ex);
1920 if (correct_index && eh->eh_entries)
1921 err = ext4_ext_correct_indexes(handle, inode, path);
1923 /* if this leaf is free, then we should
1924 * remove it from index block above */
1925 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1926 err = ext4_ext_rm_idx(handle, inode, path + depth);
1933 * ext4_ext_more_to_rm:
1934 * returns 1 if current index has to be freed (even partial)
1937 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1939 BUG_ON(path->p_idx == NULL);
1941 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1945 * if truncate on deeper level happened, it wasn't partial,
1946 * so we have to consider current index for truncation
1948 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1953 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
1955 struct super_block *sb = inode->i_sb;
1956 int depth = ext_depth(inode);
1957 struct ext4_ext_path *path;
1961 ext_debug("truncate since %u\n", start);
1963 /* probably first extent we're gonna free will be last in block */
1964 handle = ext4_journal_start(inode, depth + 1);
1966 return PTR_ERR(handle);
1968 ext4_ext_invalidate_cache(inode);
1971 * We start scanning from right side, freeing all the blocks
1972 * after i_size and walking into the tree depth-wise.
1974 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1976 ext4_journal_stop(handle);
1979 path[0].p_hdr = ext_inode_hdr(inode);
1980 if (ext4_ext_check_header(inode, path[0].p_hdr, depth)) {
1984 path[0].p_depth = depth;
1986 while (i >= 0 && err == 0) {
1988 /* this is leaf block */
1989 err = ext4_ext_rm_leaf(handle, inode, path, start);
1990 /* root level has p_bh == NULL, brelse() eats this */
1991 brelse(path[i].p_bh);
1992 path[i].p_bh = NULL;
1997 /* this is index block */
1998 if (!path[i].p_hdr) {
1999 ext_debug("initialize header\n");
2000 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2003 if (!path[i].p_idx) {
2004 /* this level hasn't been touched yet */
2005 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2006 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2007 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2009 le16_to_cpu(path[i].p_hdr->eh_entries));
2011 /* we were already here, see at next index */
2015 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2016 i, EXT_FIRST_INDEX(path[i].p_hdr),
2018 if (ext4_ext_more_to_rm(path + i)) {
2019 struct buffer_head *bh;
2020 /* go to the next level */
2021 ext_debug("move to level %d (block %llu)\n",
2022 i + 1, idx_pblock(path[i].p_idx));
2023 memset(path + i + 1, 0, sizeof(*path));
2024 bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2026 /* should we reset i_size? */
2030 if (WARN_ON(i + 1 > depth)) {
2034 if (ext4_ext_check_header(inode, ext_block_hdr(bh),
2039 path[i + 1].p_bh = bh;
2041 /* save actual number of indexes since this
2042 * number is changed at the next iteration */
2043 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2046 /* we finished processing this index, go up */
2047 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2048 /* index is empty, remove it;
2049 * handle must be already prepared by the
2050 * truncatei_leaf() */
2051 err = ext4_ext_rm_idx(handle, inode, path + i);
2053 /* root level has p_bh == NULL, brelse() eats this */
2054 brelse(path[i].p_bh);
2055 path[i].p_bh = NULL;
2057 ext_debug("return to level %d\n", i);
2061 /* TODO: flexible tree reduction should be here */
2062 if (path->p_hdr->eh_entries == 0) {
2064 * truncate to zero freed all the tree,
2065 * so we need to correct eh_depth
2067 err = ext4_ext_get_access(handle, inode, path);
2069 ext_inode_hdr(inode)->eh_depth = 0;
2070 ext_inode_hdr(inode)->eh_max =
2071 cpu_to_le16(ext4_ext_space_root(inode));
2072 err = ext4_ext_dirty(handle, inode, path);
2076 ext4_ext_tree_changed(inode);
2077 ext4_ext_drop_refs(path);
2079 ext4_journal_stop(handle);
2085 * called at mount time
2087 void ext4_ext_init(struct super_block *sb)
2090 * possible initialization would be here
2093 if (test_opt(sb, EXTENTS)) {
2094 printk("EXT4-fs: file extents enabled");
2095 #ifdef AGGRESSIVE_TEST
2096 printk(", aggressive tests");
2098 #ifdef CHECK_BINSEARCH
2099 printk(", check binsearch");
2101 #ifdef EXTENTS_STATS
2105 #ifdef EXTENTS_STATS
2106 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2107 EXT4_SB(sb)->s_ext_min = 1 << 30;
2108 EXT4_SB(sb)->s_ext_max = 0;
2114 * called at umount time
2116 void ext4_ext_release(struct super_block *sb)
2118 if (!test_opt(sb, EXTENTS))
2121 #ifdef EXTENTS_STATS
2122 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2123 struct ext4_sb_info *sbi = EXT4_SB(sb);
2124 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2125 sbi->s_ext_blocks, sbi->s_ext_extents,
2126 sbi->s_ext_blocks / sbi->s_ext_extents);
2127 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2128 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2134 * This function is called by ext4_ext_get_blocks() if someone tries to write
2135 * to an uninitialized extent. It may result in splitting the uninitialized
2136 * extent into multiple extents (upto three - one initialized and two
2138 * There are three possibilities:
2139 * a> There is no split required: Entire extent should be initialized
2140 * b> Splits in two extents: Write is happening at either end of the extent
2141 * c> Splits in three extents: Somone is writing in middle of the extent
2143 static int ext4_ext_convert_to_initialized(handle_t *handle,
2144 struct inode *inode,
2145 struct ext4_ext_path *path,
2147 unsigned long max_blocks)
2149 struct ext4_extent *ex, newex;
2150 struct ext4_extent *ex1 = NULL;
2151 struct ext4_extent *ex2 = NULL;
2152 struct ext4_extent *ex3 = NULL;
2153 struct ext4_extent_header *eh;
2154 ext4_lblk_t ee_block;
2155 unsigned int allocated, ee_len, depth;
2156 ext4_fsblk_t newblock;
2160 depth = ext_depth(inode);
2161 eh = path[depth].p_hdr;
2162 ex = path[depth].p_ext;
2163 ee_block = le32_to_cpu(ex->ee_block);
2164 ee_len = ext4_ext_get_actual_len(ex);
2165 allocated = ee_len - (iblock - ee_block);
2166 newblock = iblock - ee_block + ext_pblock(ex);
2169 /* ex1: ee_block to iblock - 1 : uninitialized */
2170 if (iblock > ee_block) {
2172 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2173 ext4_ext_mark_uninitialized(ex1);
2177 * for sanity, update the length of the ex2 extent before
2178 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2179 * overlap of blocks.
2181 if (!ex1 && allocated > max_blocks)
2182 ex2->ee_len = cpu_to_le16(max_blocks);
2183 /* ex3: to ee_block + ee_len : uninitialised */
2184 if (allocated > max_blocks) {
2185 unsigned int newdepth;
2187 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2188 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2189 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2190 ext4_ext_mark_uninitialized(ex3);
2191 err = ext4_ext_insert_extent(handle, inode, path, ex3);
2195 * The depth, and hence eh & ex might change
2196 * as part of the insert above.
2198 newdepth = ext_depth(inode);
2199 if (newdepth != depth) {
2201 path = ext4_ext_find_extent(inode, iblock, NULL);
2203 err = PTR_ERR(path);
2207 eh = path[depth].p_hdr;
2208 ex = path[depth].p_ext;
2212 allocated = max_blocks;
2215 * If there was a change of depth as part of the
2216 * insertion of ex3 above, we need to update the length
2217 * of the ex1 extent again here
2219 if (ex1 && ex1 != ex) {
2221 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2222 ext4_ext_mark_uninitialized(ex1);
2225 /* ex2: iblock to iblock + maxblocks-1 : initialised */
2226 ex2->ee_block = cpu_to_le32(iblock);
2227 ext4_ext_store_pblock(ex2, newblock);
2228 ex2->ee_len = cpu_to_le16(allocated);
2231 err = ext4_ext_get_access(handle, inode, path + depth);
2235 * New (initialized) extent starts from the first block
2236 * in the current extent. i.e., ex2 == ex
2237 * We have to see if it can be merged with the extent
2240 if (ex2 > EXT_FIRST_EXTENT(eh)) {
2242 * To merge left, pass "ex2 - 1" to try_to_merge(),
2243 * since it merges towards right _only_.
2245 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2247 err = ext4_ext_correct_indexes(handle, inode, path);
2250 depth = ext_depth(inode);
2255 * Try to Merge towards right. This might be required
2256 * only when the whole extent is being written to.
2257 * i.e. ex2 == ex and ex3 == NULL.
2260 ret = ext4_ext_try_to_merge(inode, path, ex2);
2262 err = ext4_ext_correct_indexes(handle, inode, path);
2267 /* Mark modified extent as dirty */
2268 err = ext4_ext_dirty(handle, inode, path + depth);
2271 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2273 return err ? err : allocated;
2277 * Need to be called with
2278 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
2279 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
2281 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
2283 unsigned long max_blocks, struct buffer_head *bh_result,
2284 int create, int extend_disksize)
2286 struct ext4_ext_path *path = NULL;
2287 struct ext4_extent_header *eh;
2288 struct ext4_extent newex, *ex;
2289 ext4_fsblk_t goal, newblock;
2290 int err = 0, depth, ret;
2291 unsigned long allocated = 0;
2292 struct ext4_allocation_request ar;
2294 __clear_bit(BH_New, &bh_result->b_state);
2295 ext_debug("blocks %u/%lu requested for inode %u\n",
2296 iblock, max_blocks, inode->i_ino);
2298 /* check in cache */
2299 goal = ext4_ext_in_cache(inode, iblock, &newex);
2301 if (goal == EXT4_EXT_CACHE_GAP) {
2304 * block isn't allocated yet and
2305 * user doesn't want to allocate it
2309 /* we should allocate requested block */
2310 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
2311 /* block is already allocated */
2313 - le32_to_cpu(newex.ee_block)
2314 + ext_pblock(&newex);
2315 /* number of remaining blocks in the extent */
2316 allocated = le16_to_cpu(newex.ee_len) -
2317 (iblock - le32_to_cpu(newex.ee_block));
2324 /* find extent for this block */
2325 path = ext4_ext_find_extent(inode, iblock, NULL);
2327 err = PTR_ERR(path);
2332 depth = ext_depth(inode);
2335 * consistent leaf must not be empty;
2336 * this situation is possible, though, _during_ tree modification;
2337 * this is why assert can't be put in ext4_ext_find_extent()
2339 BUG_ON(path[depth].p_ext == NULL && depth != 0);
2340 eh = path[depth].p_hdr;
2342 ex = path[depth].p_ext;
2344 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
2345 ext4_fsblk_t ee_start = ext_pblock(ex);
2346 unsigned short ee_len;
2349 * Uninitialized extents are treated as holes, except that
2350 * we split out initialized portions during a write.
2352 ee_len = ext4_ext_get_actual_len(ex);
2353 /* if found extent covers block, simply return it */
2354 if (iblock >= ee_block && iblock < ee_block + ee_len) {
2355 newblock = iblock - ee_block + ee_start;
2356 /* number of remaining blocks in the extent */
2357 allocated = ee_len - (iblock - ee_block);
2358 ext_debug("%u fit into %lu:%d -> %llu\n", iblock,
2359 ee_block, ee_len, newblock);
2361 /* Do not put uninitialized extent in the cache */
2362 if (!ext4_ext_is_uninitialized(ex)) {
2363 ext4_ext_put_in_cache(inode, ee_block,
2365 EXT4_EXT_CACHE_EXTENT);
2368 if (create == EXT4_CREATE_UNINITIALIZED_EXT)
2373 ret = ext4_ext_convert_to_initialized(handle, inode,
2385 * requested block isn't allocated yet;
2386 * we couldn't try to create block if create flag is zero
2390 * put just found gap into cache to speed up
2391 * subsequent requests
2393 ext4_ext_put_gap_in_cache(inode, path, iblock);
2397 * Okay, we need to do block allocation. Lazily initialize the block
2398 * allocation info here if necessary.
2400 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2401 ext4_init_block_alloc_info(inode);
2403 /* find neighbour allocated blocks */
2405 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
2409 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
2414 * See if request is beyond maximum number of blocks we can have in
2415 * a single extent. For an initialized extent this limit is
2416 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
2417 * EXT_UNINIT_MAX_LEN.
2419 if (max_blocks > EXT_INIT_MAX_LEN &&
2420 create != EXT4_CREATE_UNINITIALIZED_EXT)
2421 max_blocks = EXT_INIT_MAX_LEN;
2422 else if (max_blocks > EXT_UNINIT_MAX_LEN &&
2423 create == EXT4_CREATE_UNINITIALIZED_EXT)
2424 max_blocks = EXT_UNINIT_MAX_LEN;
2426 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
2427 newex.ee_block = cpu_to_le32(iblock);
2428 newex.ee_len = cpu_to_le16(max_blocks);
2429 err = ext4_ext_check_overlap(inode, &newex, path);
2431 allocated = le16_to_cpu(newex.ee_len);
2433 allocated = max_blocks;
2435 /* allocate new block */
2437 ar.goal = ext4_ext_find_goal(inode, path, iblock);
2438 ar.logical = iblock;
2440 if (S_ISREG(inode->i_mode))
2441 ar.flags = EXT4_MB_HINT_DATA;
2443 /* disable in-core preallocation for non-regular files */
2445 newblock = ext4_mb_new_blocks(handle, &ar, &err);
2448 ext_debug("allocate new block: goal %llu, found %llu/%lu\n",
2449 goal, newblock, allocated);
2451 /* try to insert new extent into found leaf and return */
2452 ext4_ext_store_pblock(&newex, newblock);
2453 newex.ee_len = cpu_to_le16(ar.len);
2454 if (create == EXT4_CREATE_UNINITIALIZED_EXT) /* Mark uninitialized */
2455 ext4_ext_mark_uninitialized(&newex);
2456 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2458 /* free data blocks we just allocated */
2459 /* not a good idea to call discard here directly,
2460 * but otherwise we'd need to call it every free() */
2461 ext4_mb_discard_inode_preallocations(inode);
2462 ext4_free_blocks(handle, inode, ext_pblock(&newex),
2463 le16_to_cpu(newex.ee_len), 0);
2467 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2468 EXT4_I(inode)->i_disksize = inode->i_size;
2470 /* previous routine could use block we allocated */
2471 newblock = ext_pblock(&newex);
2472 allocated = le16_to_cpu(newex.ee_len);
2474 __set_bit(BH_New, &bh_result->b_state);
2476 /* Cache only when it is _not_ an uninitialized extent */
2477 if (create != EXT4_CREATE_UNINITIALIZED_EXT)
2478 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2479 EXT4_EXT_CACHE_EXTENT);
2481 if (allocated > max_blocks)
2482 allocated = max_blocks;
2483 ext4_ext_show_leaf(inode, path);
2484 __set_bit(BH_Mapped, &bh_result->b_state);
2485 bh_result->b_bdev = inode->i_sb->s_bdev;
2486 bh_result->b_blocknr = newblock;
2489 ext4_ext_drop_refs(path);
2492 return err ? err : allocated;
2495 void ext4_ext_truncate(struct inode * inode, struct page *page)
2497 struct address_space *mapping = inode->i_mapping;
2498 struct super_block *sb = inode->i_sb;
2499 ext4_lblk_t last_block;
2504 * probably first extent we're gonna free will be last in block
2506 err = ext4_writepage_trans_blocks(inode) + 3;
2507 handle = ext4_journal_start(inode, err);
2508 if (IS_ERR(handle)) {
2510 clear_highpage(page);
2511 flush_dcache_page(page);
2513 page_cache_release(page);
2519 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2521 down_write(&EXT4_I(inode)->i_data_sem);
2522 ext4_ext_invalidate_cache(inode);
2524 ext4_mb_discard_inode_preallocations(inode);
2527 * TODO: optimization is possible here.
2528 * Probably we need not scan at all,
2529 * because page truncation is enough.
2531 if (ext4_orphan_add(handle, inode))
2534 /* we have to know where to truncate from in crash case */
2535 EXT4_I(inode)->i_disksize = inode->i_size;
2536 ext4_mark_inode_dirty(handle, inode);
2538 last_block = (inode->i_size + sb->s_blocksize - 1)
2539 >> EXT4_BLOCK_SIZE_BITS(sb);
2540 err = ext4_ext_remove_space(inode, last_block);
2542 /* In a multi-transaction truncate, we only make the final
2543 * transaction synchronous.
2550 * If this was a simple ftruncate() and the file will remain alive,
2551 * then we need to clear up the orphan record which we created above.
2552 * However, if this was a real unlink then we were called by
2553 * ext4_delete_inode(), and we allow that function to clean up the
2554 * orphan info for us.
2557 ext4_orphan_del(handle, inode);
2559 up_write(&EXT4_I(inode)->i_data_sem);
2560 ext4_journal_stop(handle);
2564 * ext4_ext_writepage_trans_blocks:
2565 * calculate max number of blocks we could modify
2566 * in order to allocate new block for an inode
2568 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2572 needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2574 /* caller wants to allocate num blocks, but note it includes sb */
2575 needed = needed * num - (num - 1);
2578 needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2585 * preallocate space for a file. This implements ext4's fallocate inode
2586 * operation, which gets called from sys_fallocate system call.
2587 * For block-mapped files, posix_fallocate should fall back to the method
2588 * of writing zeroes to the required new blocks (the same behavior which is
2589 * expected for file systems which do not support fallocate() system call).
2591 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
2595 unsigned long max_blocks;
2596 ext4_fsblk_t nblocks = 0;
2600 struct buffer_head map_bh;
2601 unsigned int credits, blkbits = inode->i_blkbits;
2604 * currently supporting (pre)allocate mode for extent-based
2607 if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
2610 /* preallocation to directories is currently not supported */
2611 if (S_ISDIR(inode->i_mode))
2614 block = offset >> blkbits;
2615 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
2619 * credits to insert 1 extent into extent tree + buffers to be able to
2620 * modify 1 super block, 1 block bitmap and 1 group descriptor.
2622 credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb) + 3;
2623 down_write((&EXT4_I(inode)->i_data_sem));
2625 while (ret >= 0 && ret < max_blocks) {
2626 block = block + ret;
2627 max_blocks = max_blocks - ret;
2628 handle = ext4_journal_start(inode, credits);
2629 if (IS_ERR(handle)) {
2630 ret = PTR_ERR(handle);
2634 ret = ext4_ext_get_blocks(handle, inode, block,
2635 max_blocks, &map_bh,
2636 EXT4_CREATE_UNINITIALIZED_EXT, 0);
2639 ext4_error(inode->i_sb, "ext4_fallocate",
2640 "ext4_ext_get_blocks returned error: "
2641 "inode#%lu, block=%u, max_blocks=%lu",
2642 inode->i_ino, block, max_blocks);
2644 ext4_mark_inode_dirty(handle, inode);
2645 ret2 = ext4_journal_stop(handle);
2649 /* check wrap through sign-bit/zero here */
2650 if ((block + ret) < 0 || (block + ret) < block) {
2652 ext4_mark_inode_dirty(handle, inode);
2653 ret2 = ext4_journal_stop(handle);
2656 if (buffer_new(&map_bh) && ((block + ret) >
2657 (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
2659 nblocks = nblocks + ret;
2662 /* Update ctime if new blocks get allocated */
2664 struct timespec now;
2666 now = current_fs_time(inode->i_sb);
2667 if (!timespec_equal(&inode->i_ctime, &now))
2668 inode->i_ctime = now;
2671 ext4_mark_inode_dirty(handle, inode);
2672 ret2 = ext4_journal_stop(handle);
2677 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2680 up_write((&EXT4_I(inode)->i_data_sem));
2682 * Time to update the file size.
2683 * Update only when preallocation was requested beyond the file size.
2685 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
2686 (offset + len) > i_size_read(inode)) {
2689 * if no error, we assume preallocation succeeded
2692 mutex_lock(&inode->i_mutex);
2693 i_size_write(inode, offset + len);
2694 EXT4_I(inode)->i_disksize = i_size_read(inode);
2695 mutex_unlock(&inode->i_mutex);
2696 } else if (ret < 0 && nblocks) {
2697 /* Handle partial allocation scenario */
2700 mutex_lock(&inode->i_mutex);
2701 newsize = (nblocks << blkbits) + i_size_read(inode);
2702 i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
2703 EXT4_I(inode)->i_disksize = i_size_read(inode);
2704 mutex_unlock(&inode->i_mutex);
2708 return ret > 0 ? ret2 : ret;