]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/commitdiff
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Apr 2009 17:57:49 +0000 (10:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Apr 2009 17:57:49 +0000 (10:57 -0700)
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (33 commits)
  ext4: Regularize mount options
  ext4: fix locking typo in mballoc which could cause soft lockup hangs
  ext4: fix typo which causes a memory leak on error path
  jbd2: Update locking coments
  ext4: Rename pa_linear to pa_type
  ext4: add checks of block references for non-extent inodes
  ext4: Check for an valid i_mode when reading the inode from disk
  ext4: Use WRITE_SYNC for commits which are caused by fsync()
  ext4: Add auto_da_alloc mount option
  ext4: Use struct flex_groups to calculate get_orlov_stats()
  ext4: Use atomic_t's in struct flex_groups
  ext4: remove /proc tuning knobs
  ext4: Add sysfs support
  ext4: Track lifetime disk writes
  ext4: Fix discard of inode prealloc space with delayed allocation.
  ext4: Automatically allocate delay allocated blocks on rename
  ext4: Automatically allocate delay allocated blocks on close
  ext4: add EXT4_IOC_ALLOC_DA_BLKS ioctl
  ext4: Simplify delalloc code by removing mpage_da_writepages()
  ext4: Save stack space by removing fake buffer heads
  ...

1  2 
fs/ext4/ext4.h
fs/ext4/inode.c

diff --combined fs/ext4/ext4.h
index 990c9400092414ed6cedeffe724afe103d116efc,a004699e72961be5aaa9940b15a368b1d21c8817..d0f15ef56de1b1b6b325d0a86207e78260b1f40a
   */
  #undef EXT4FS_DEBUG
  
- /*
-  * Define EXT4_RESERVATION to reserve data blocks for expanding files
-  */
- #define EXT4_DEFAULT_RESERVE_BLOCKS   8
- /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
- #define EXT4_MAX_RESERVE_BLOCKS               1027
- #define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
  /*
   * Debug code
   */
@@@ -54,8 -46,6 +46,6 @@@
  #define ext4_debug(f, a...)   do {} while (0)
  #endif
  
- #define EXT4_MULTIBLOCK_ALLOCATOR     1
  /* prefer goal again. length */
  #define EXT4_MB_HINT_MERGE            1
  /* blocks already reserved */
@@@ -180,8 -170,9 +170,9 @@@ struct ext4_group_des
   */
  
  struct flex_groups {
-       __u32 free_inodes;
-       __u32 free_blocks;
+       atomic_t free_inodes;
+       atomic_t free_blocks;
+       atomic_t used_dirs;
  };
  
  #define EXT4_BG_INODE_UNINIT  0x0001 /* Inode table/bitmap not in use */
  #define EXT4_FL_USER_VISIBLE          0x000BDFFF /* User visible flags */
  #define EXT4_FL_USER_MODIFIABLE               0x000B80FF /* User modifiable flags */
  
+ /* Flags that should be inherited by new inodes from their parent. */
+ #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
+                          EXT4_SYNC_FL | EXT4_IMMUTABLE_FL | EXT4_APPEND_FL |\
+                          EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
+                          EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
+                          EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL)
+ /* Flags that are appropriate for regular files (all but dir-specific ones). */
+ #define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL))
+ /* Flags that are appropriate for non-directories/regular files. */
+ #define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
+ /* Mask out flags that are inappropriate for the given type of inode. */
+ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
+ {
+       if (S_ISDIR(mode))
+               return flags;
+       else if (S_ISREG(mode))
+               return flags & EXT4_REG_FLMASK;
+       else
+               return flags & EXT4_OTHER_FLMASK;
+ }
  /*
   * Inode dynamic state flags
   */
  #define EXT4_STATE_NEW                        0x00000002 /* inode is newly created */
  #define EXT4_STATE_XATTR              0x00000004 /* has in-inode xattrs */
  #define EXT4_STATE_NO_EXPAND          0x00000008 /* No space for expansion */
+ #define EXT4_STATE_DA_ALLOC_CLOSE     0x00000010 /* Alloc DA blks on close */
  
  /* Used to pass group descriptor data when online resize is done */
  struct ext4_new_group_input {
@@@ -303,7 -319,9 +319,9 @@@ struct ext4_new_group_data 
  #define EXT4_IOC_GROUP_EXTEND         _IOW('f', 7, unsigned long)
  #define EXT4_IOC_GROUP_ADD            _IOW('f', 8, struct ext4_new_group_input)
  #define EXT4_IOC_MIGRATE              _IO('f', 9)
+  /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
   /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
+ #define EXT4_IOC_ALLOC_DA_BLKS                _IO('f', 12)
  
  /*
   * ioctl commands in 32 bit emulation
@@@ -531,7 -549,7 +549,7 @@@ do {                                                                              
  #define EXT4_MOUNT_NO_UID32           0x02000  /* Disable 32-bit UIDs */
  #define EXT4_MOUNT_XATTR_USER         0x04000 /* Extended user attributes */
  #define EXT4_MOUNT_POSIX_ACL          0x08000 /* POSIX Access Control Lists */
- #define EXT4_MOUNT_RESERVATION                0x10000 /* Preallocation */
+ #define EXT4_MOUNT_NO_AUTO_DA_ALLOC   0x10000 /* No auto delalloc mapping */
  #define EXT4_MOUNT_BARRIER            0x20000 /* Use block barriers */
  #define EXT4_MOUNT_NOBH                       0x40000 /* No bufferheads */
  #define EXT4_MOUNT_QUOTA              0x80000 /* Some quota option set */
@@@ -666,7 -684,8 +684,8 @@@ struct ext4_super_block 
        __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
        __u8    s_reserved_char_pad2;
        __le16  s_reserved_pad;
-       __u32   s_reserved[162];        /* Padding to the end of the block */
+       __le64  s_kbytes_written;       /* nr of lifetime kilobytes written */
+       __u32   s_reserved[160];        /* Padding to the end of the block */
  };
  
  #ifdef __KERNEL__
@@@ -813,6 -832,12 +832,12 @@@ static inline int ext4_valid_inum(struc
  #define EXT4_DEF_MIN_BATCH_TIME       0
  #define EXT4_DEF_MAX_BATCH_TIME       15000 /* 15ms */
  
+ /*
+  * Minimum number of groups in a flexgroup before we separate out
+  * directories into the first block group of a flexgroup
+  */
+ #define EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME       4
  /*
   * Structure of a directory entry
   */
@@@ -865,24 -890,6 +890,6 @@@ struct ext4_dir_entry_2 
                                         ~EXT4_DIR_ROUND)
  #define EXT4_MAX_REC_LEN              ((1<<16)-1)
  
- static inline unsigned ext4_rec_len_from_disk(__le16 dlen)
- {
-       unsigned len = le16_to_cpu(dlen);
-       if (len == EXT4_MAX_REC_LEN || len == 0)
-               return 1 << 16;
-       return len;
- }
- static inline __le16 ext4_rec_len_to_disk(unsigned len)
- {
-       if (len == (1 << 16))
-               return cpu_to_le16(EXT4_MAX_REC_LEN);
-       else if (len > (1 << 16))
-               BUG();
-       return cpu_to_le16(len);
- }
  /*
   * Hash Tree Directory indexing
   * (c) Daniel Phillips, 2001
@@@ -970,22 -977,6 +977,6 @@@ void ext4_get_group_no_and_offset(struc
  
  extern struct proc_dir_entry *ext4_proc_root;
  
- #ifdef CONFIG_PROC_FS
- extern const struct file_operations ext4_ui_proc_fops;
- #define       EXT4_PROC_HANDLER(name, var)                                    \
- do {                                                                  \
-       proc = proc_create_data(name, mode, sbi->s_proc,                \
-                               &ext4_ui_proc_fops, &sbi->s_##var);     \
-       if (proc == NULL) {                                             \
-               printk(KERN_ERR "EXT4-fs: can't create %s\n", name);    \
-               goto err_out;                                           \
-       }                                                               \
- } while (0)
- #else
- #define EXT4_PROC_HANDLER(name, var)
- #endif
  /*
   * Function prototypes
   */
@@@ -1092,13 -1083,14 +1083,14 @@@ extern int ext4_can_truncate(struct ino
  extern void ext4_truncate(struct inode *);
  extern void ext4_set_inode_flags(struct inode *);
  extern void ext4_get_inode_flags(struct ext4_inode_info *);
+ extern int ext4_alloc_da_blocks(struct inode *inode);
  extern void ext4_set_aops(struct inode *inode);
  extern int ext4_writepage_trans_blocks(struct inode *);
  extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
  extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
  extern int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from);
 -extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
 +extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
  extern qsize_t ext4_get_reserved_space(struct inode *inode);
  
  /* ioctl.c */
@@@ -1107,7 -1099,10 +1099,10 @@@ extern long ext4_compat_ioctl(struct fi
  
  /* migrate.c */
  extern int ext4_ext_migrate(struct inode *);
  /* namei.c */
+ extern unsigned int ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize);
+ extern __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize);
  extern int ext4_orphan_add(handle_t *, struct inode *);
  extern int ext4_orphan_del(handle_t *, struct inode *);
  extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
diff --combined fs/ext4/inode.c
index dd82ff390067482ac31fac5f5a9e3414d45cbcdf,b3fd65d46506e4d96627e7bddeab12edda8e500f..a2e7952bc5f9a6e3f3ed877dde89fb93ef4b4035
@@@ -371,6 -371,34 +371,34 @@@ static int ext4_block_to_path(struct in
        return n;
  }
  
+ static int __ext4_check_blockref(const char *function, struct inode *inode,
+                                unsigned int *p, unsigned int max) {
+       unsigned int maxblocks = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es);
+       unsigned int *bref = p;
+       while (bref < p+max) {
+               if (unlikely(*bref >= maxblocks)) {
+                       ext4_error(inode->i_sb, function,
+                                  "block reference %u >= max (%u) "
+                                  "in inode #%lu, offset=%d",
+                                  *bref, maxblocks,
+                                  inode->i_ino, (int)(bref-p));
+                       return -EIO;
+               }
+               bref++;
+       }
+       return 0;
+ }
+ #define ext4_check_indirect_blockref(inode, bh)                         \
+         __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
+                             EXT4_ADDR_PER_BLOCK((inode)->i_sb))
+ #define ext4_check_inode_blockref(inode)                                \
+         __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
+                             EXT4_NDIR_BLOCKS)
  /**
   *    ext4_get_branch - read the chain of indirect blocks leading to data
   *    @inode: inode in question
@@@ -415,9 -443,22 +443,22 @@@ static Indirect *ext4_get_branch(struc
        if (!p->key)
                goto no_block;
        while (--depth) {
-               bh = sb_bread(sb, le32_to_cpu(p->key));
-               if (!bh)
+               bh = sb_getblk(sb, le32_to_cpu(p->key));
+               if (unlikely(!bh))
                        goto failure;
+                   
+               if (!bh_uptodate_or_lock(bh)) {
+                       if (bh_submit_read(bh) < 0) {
+                               put_bh(bh);
+                               goto failure;
+                       }
+                       /* validate block references */
+                       if (ext4_check_indirect_blockref(inode, bh)) {
+                               put_bh(bh);
+                               goto failure;
+                       }
+               }
+               
                add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
                /* Reader: end */
                if (!p->key)
@@@ -459,6 -500,8 +500,8 @@@ static ext4_fsblk_t ext4_find_near(stru
        ext4_fsblk_t bg_start;
        ext4_fsblk_t last_block;
        ext4_grpblk_t colour;
+       ext4_group_t block_group;
+       int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
  
        /* Try to find previous block */
        for (p = ind->p - 1; p >= start; p--) {
         * It is going to be referred to from the inode itself? OK, just put it
         * into the same cylinder group then.
         */
-       bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
+       block_group = ei->i_block_group;
+       if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
+               block_group &= ~(flex_size-1);
+               if (S_ISREG(inode->i_mode))
+                       block_group++;
+       }
+       bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
        last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
  
+       /*
+        * If we are doing delayed allocation, we don't need take
+        * colour into account.
+        */
+       if (test_opt(inode->i_sb, DELALLOC))
+               return bg_start;
        if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
                colour = (current->pid % 16) *
                        (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
@@@ -1052,9 -1108,16 +1108,16 @@@ static void ext4_da_update_reserve_spac
        /*
         * free those over-booking quota for metadata blocks
         */
        if (mdb_free)
                vfs_dq_release_reservation_block(inode, mdb_free);
+       /*
+        * If we have done all the pending block allocations and if
+        * there aren't any writers on the inode, we can discard the
+        * inode's preallocations.
+        */
+       if (!total && (atomic_read(&inode->i_writecount) == 0))
+               ext4_discard_preallocations(inode);
  }
  
  /*
@@@ -1688,9 -1751,10 +1751,10 @@@ static void ext4_da_page_release_reserv
  
  struct mpage_da_data {
        struct inode *inode;
-       struct buffer_head lbh;                 /* extent of blocks */
+       sector_t b_blocknr;             /* start block number of extent */
+       size_t b_size;                  /* size of extent */
+       unsigned long b_state;          /* state of the extent */
        unsigned long first_page, next_page;    /* extent of pages */
-       get_block_t *get_block;
        struct writeback_control *wbc;
        int io_done;
        int pages_written;
   * @mpd->inode: inode
   * @mpd->first_page: first page of the extent
   * @mpd->next_page: page after the last page of the extent
-  * @mpd->get_block: the filesystem's block mapper function
   *
   * By the time mpage_da_submit_io() is called we expect all blocks
   * to be allocated. this may be wrong if allocation failed.
@@@ -1724,7 -1787,7 +1787,7 @@@ static int mpage_da_submit_io(struct mp
        /*
         * We need to start from the first_page to the next_page - 1
         * to make sure we also write the mapped dirty buffer_heads.
-        * If we look at mpd->lbh.b_blocknr we would only be looking
+        * If we look at mpd->b_blocknr we would only be looking
         * at the currently mapped buffer_heads.
         */
        index = mpd->first_page;
@@@ -1914,68 -1977,111 +1977,111 @@@ static void ext4_print_free_blocks(stru
        return;
  }
  
+ #define               EXT4_DELALLOC_RSVED     1
+ static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
+                                  struct buffer_head *bh_result, int create)
+ {
+       int ret;
+       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+       loff_t disksize = EXT4_I(inode)->i_disksize;
+       handle_t *handle = NULL;
+       handle = ext4_journal_current_handle();
+       BUG_ON(!handle);
+       ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
+                                  bh_result, create, 0, EXT4_DELALLOC_RSVED);
+       if (ret <= 0)
+               return ret;
+       bh_result->b_size = (ret << inode->i_blkbits);
+       if (ext4_should_order_data(inode)) {
+               int retval;
+               retval = ext4_jbd2_file_inode(handle, inode);
+               if (retval)
+                       /*
+                        * Failed to add inode for ordered mode. Don't
+                        * update file size
+                        */
+                       return retval;
+       }
+       /*
+        * Update on-disk size along with block allocation we don't
+        * use 'extend_disksize' as size may change within already
+        * allocated block -bzzz
+        */
+       disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
+       if (disksize > i_size_read(inode))
+               disksize = i_size_read(inode);
+       if (disksize > EXT4_I(inode)->i_disksize) {
+               ext4_update_i_disksize(inode, disksize);
+               ret = ext4_mark_inode_dirty(handle, inode);
+               return ret;
+       }
+       return 0;
+ }
  /*
   * mpage_da_map_blocks - go through given space
   *
-  * @mpd->lbh - bh describing space
-  * @mpd->get_block - the filesystem's block mapper function
+  * @mpd - bh describing space
   *
   * The function skips space we know is already mapped to disk blocks.
   *
   */
- static int  mpage_da_map_blocks(struct mpage_da_data *mpd)
+ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
  {
        int err = 0;
        struct buffer_head new;
-       struct buffer_head *lbh = &mpd->lbh;
        sector_t next;
  
        /*
         * We consider only non-mapped and non-allocated blocks
         */
-       if (buffer_mapped(lbh) && !buffer_delay(lbh))
+       if ((mpd->b_state  & (1 << BH_Mapped)) &&
+           !(mpd->b_state & (1 << BH_Delay)))
                return 0;
-       new.b_state = lbh->b_state;
+       new.b_state = mpd->b_state;
        new.b_blocknr = 0;
-       new.b_size = lbh->b_size;
-       next = lbh->b_blocknr;
+       new.b_size = mpd->b_size;
+       next = mpd->b_blocknr;
        /*
         * If we didn't accumulate anything
         * to write simply return
         */
        if (!new.b_size)
                return 0;
-       err = mpd->get_block(mpd->inode, next, &new, 1);
-       if (err) {
  
-               /* If get block returns with error
-                * we simply return. Later writepage
-                * will redirty the page and writepages
-                * will find the dirty page again
+       err = ext4_da_get_block_write(mpd->inode, next, &new, 1);
+       if (err) {
+               /*
+                * If get block returns with error we simply
+                * return. Later writepage will redirty the page and
+                * writepages will find the dirty page again
                 */
                if (err == -EAGAIN)
                        return 0;
  
                if (err == -ENOSPC &&
-                               ext4_count_free_blocks(mpd->inode->i_sb)) {
+                   ext4_count_free_blocks(mpd->inode->i_sb)) {
                        mpd->retval = err;
                        return 0;
                }
  
                /*
-                * get block failure will cause us
-                * to loop in writepages. Because
-                * a_ops->writepage won't be able to
-                * make progress. The page will be redirtied
-                * by writepage and writepages will again
-                * try to write the same.
+                * get block failure will cause us to loop in
+                * writepages, because a_ops->writepage won't be able
+                * to make progress. The page will be redirtied by
+                * writepage and writepages will again try to write
+                * the same.
                 */
                printk(KERN_EMERG "%s block allocation failed for inode %lu "
                                  "at logical offset %llu with max blocks "
                                  "%zd with error %d\n",
                                  __func__, mpd->inode->i_ino,
                                  (unsigned long long)next,
-                                 lbh->b_size >> mpd->inode->i_blkbits, err);
+                                 mpd->b_size >> mpd->inode->i_blkbits, err);
                printk(KERN_EMERG "This should not happen.!! "
                                        "Data will be lost\n");
                if (err == -ENOSPC) {
                }
                /* invlaidate all the pages */
                ext4_da_block_invalidatepages(mpd, next,
-                               lbh->b_size >> mpd->inode->i_blkbits);
+                               mpd->b_size >> mpd->inode->i_blkbits);
                return err;
        }
        BUG_ON(new.b_size == 0);
         * If blocks are delayed marked, we need to
         * put actual blocknr and drop delayed bit
         */
-       if (buffer_delay(lbh) || buffer_unwritten(lbh))
+       if ((mpd->b_state & (1 << BH_Delay)) ||
+           (mpd->b_state & (1 << BH_Unwritten)))
                mpage_put_bnr_to_bhs(mpd, next, &new);
  
        return 0;
   * the function is used to collect contig. blocks in same state
   */
  static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
-                                  sector_t logical, struct buffer_head *bh)
+                                  sector_t logical, size_t b_size,
+                                  unsigned long b_state)
  {
        sector_t next;
-       size_t b_size = bh->b_size;
-       struct buffer_head *lbh = &mpd->lbh;
-       int nrblocks = lbh->b_size >> mpd->inode->i_blkbits;
+       int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
  
        /* check if thereserved journal credits might overflow */
        if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
        /*
         * First block in the extent
         */
-       if (lbh->b_size == 0) {
-               lbh->b_blocknr = logical;
-               lbh->b_size = b_size;
-               lbh->b_state = bh->b_state & BH_FLAGS;
+       if (mpd->b_size == 0) {
+               mpd->b_blocknr = logical;
+               mpd->b_size = b_size;
+               mpd->b_state = b_state & BH_FLAGS;
                return;
        }
  
-       next = lbh->b_blocknr + nrblocks;
+       next = mpd->b_blocknr + nrblocks;
        /*
         * Can we merge the block to our big extent?
         */
-       if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) {
-               lbh->b_size += b_size;
+       if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
+               mpd->b_size += b_size;
                return;
        }
  
@@@ -2087,7 -2193,7 +2193,7 @@@ static int __mpage_da_writepage(struct 
  {
        struct mpage_da_data *mpd = data;
        struct inode *inode = mpd->inode;
-       struct buffer_head *bh, *head, fake;
+       struct buffer_head *bh, *head;
        sector_t logical;
  
        if (mpd->io_done) {
                /*
                 * ... and blocks
                 */
-               mpd->lbh.b_size = 0;
-               mpd->lbh.b_state = 0;
-               mpd->lbh.b_blocknr = 0;
+               mpd->b_size = 0;
+               mpd->b_state = 0;
+               mpd->b_blocknr = 0;
        }
  
        mpd->next_page = page->index + 1;
                  (PAGE_CACHE_SHIFT - inode->i_blkbits);
  
        if (!page_has_buffers(page)) {
-               /*
-                * There is no attached buffer heads yet (mmap?)
-                * we treat the page asfull of dirty blocks
-                */
-               bh = &fake;
-               bh->b_size = PAGE_CACHE_SIZE;
-               bh->b_state = 0;
-               set_buffer_dirty(bh);
-               set_buffer_uptodate(bh);
-               mpage_add_bh_to_extent(mpd, logical, bh);
+               mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
+                                      (1 << BH_Dirty) | (1 << BH_Uptodate));
                if (mpd->io_done)
                        return MPAGE_DA_EXTENT_TAIL;
        } else {
                         * with the page in ext4_da_writepage
                         */
                        if (buffer_dirty(bh) &&
-                               (!buffer_mapped(bh) || buffer_delay(bh))) {
-                               mpage_add_bh_to_extent(mpd, logical, bh);
+                           (!buffer_mapped(bh) || buffer_delay(bh))) {
+                               mpage_add_bh_to_extent(mpd, logical,
+                                                      bh->b_size,
+                                                      bh->b_state);
                                if (mpd->io_done)
                                        return MPAGE_DA_EXTENT_TAIL;
                        } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
                                 * unmapped buffer_head later we need to
                                 * use the b_state flag of that buffer_head.
                                 */
-                               if (mpd->lbh.b_size == 0)
-                                       mpd->lbh.b_state =
-                                               bh->b_state & BH_FLAGS;
+                               if (mpd->b_size == 0)
+                                       mpd->b_state = bh->b_state & BH_FLAGS;
                        }
                        logical++;
                } while ((bh = bh->b_this_page) != head);
        return 0;
  }
  
- /*
-  * mpage_da_writepages - walk the list of dirty pages of the given
-  * address space, allocates non-allocated blocks, maps newly-allocated
-  * blocks to existing bhs and issue IO them
-  *
-  * @mapping: address space structure to write
-  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
-  * @get_block: the filesystem's block mapper function.
-  *
-  * This is a library function, which implements the writepages()
-  * address_space_operation.
-  */
- static int mpage_da_writepages(struct address_space *mapping,
-                              struct writeback_control *wbc,
-                              struct mpage_da_data *mpd)
- {
-       int ret;
-       if (!mpd->get_block)
-               return generic_writepages(mapping, wbc);
-       mpd->lbh.b_size = 0;
-       mpd->lbh.b_state = 0;
-       mpd->lbh.b_blocknr = 0;
-       mpd->first_page = 0;
-       mpd->next_page = 0;
-       mpd->io_done = 0;
-       mpd->pages_written = 0;
-       mpd->retval = 0;
-       ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd);
-       /*
-        * Handle last extent of pages
-        */
-       if (!mpd->io_done && mpd->next_page != mpd->first_page) {
-               if (mpage_da_map_blocks(mpd) == 0)
-                       mpage_da_submit_io(mpd);
-               mpd->io_done = 1;
-               ret = MPAGE_DA_EXTENT_TAIL;
-       }
-       wbc->nr_to_write -= mpd->pages_written;
-       return ret;
- }
  /*
   * this is a special callback for ->write_begin() only
   * it's intention is to return mapped block or reserve space
@@@ -2274,51 -2328,6 +2328,6 @@@ static int ext4_da_get_block_prep(struc
  
        return ret;
  }
- #define               EXT4_DELALLOC_RSVED     1
- static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
-                                  struct buffer_head *bh_result, int create)
- {
-       int ret;
-       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
-       loff_t disksize = EXT4_I(inode)->i_disksize;
-       handle_t *handle = NULL;
-       handle = ext4_journal_current_handle();
-       BUG_ON(!handle);
-       ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
-                       bh_result, create, 0, EXT4_DELALLOC_RSVED);
-       if (ret > 0) {
-               bh_result->b_size = (ret << inode->i_blkbits);
-               if (ext4_should_order_data(inode)) {
-                       int retval;
-                       retval = ext4_jbd2_file_inode(handle, inode);
-                       if (retval)
-                               /*
-                                * Failed to add inode for ordered
-                                * mode. Don't update file size
-                                */
-                               return retval;
-               }
-               /*
-                * Update on-disk size along with block allocation
-                * we don't use 'extend_disksize' as size may change
-                * within already allocated block -bzzz
-                */
-               disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
-               if (disksize > i_size_read(inode))
-                       disksize = i_size_read(inode);
-               if (disksize > EXT4_I(inode)->i_disksize) {
-                       ext4_update_i_disksize(inode, disksize);
-                       ret = ext4_mark_inode_dirty(handle, inode);
-                       return ret;
-               }
-               ret = 0;
-       }
-       return ret;
- }
  
  static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
  {
@@@ -2569,8 -2578,38 +2578,38 @@@ retry
                        dump_stack();
                        goto out_writepages;
                }
-               mpd.get_block = ext4_da_get_block_write;
-               ret = mpage_da_writepages(mapping, wbc, &mpd);
+               /*
+                * Now call __mpage_da_writepage to find the next
+                * contiguous region of logical blocks that need
+                * blocks to be allocated by ext4.  We don't actually
+                * submit the blocks for I/O here, even though
+                * write_cache_pages thinks it will, and will set the
+                * pages as clean for write before calling
+                * __mpage_da_writepage().
+                */
+               mpd.b_size = 0;
+               mpd.b_state = 0;
+               mpd.b_blocknr = 0;
+               mpd.first_page = 0;
+               mpd.next_page = 0;
+               mpd.io_done = 0;
+               mpd.pages_written = 0;
+               mpd.retval = 0;
+               ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
+                                       &mpd);
+               /*
+                * If we have a contigous extent of pages and we
+                * haven't done the I/O yet, map the blocks and submit
+                * them for I/O.
+                */
+               if (!mpd.io_done && mpd.next_page != mpd.first_page) {
+                       if (mpage_da_map_blocks(&mpd) == 0)
+                               mpage_da_submit_io(&mpd);
+                       mpd.io_done = 1;
+                       ret = MPAGE_DA_EXTENT_TAIL;
+               }
+               wbc->nr_to_write -= mpd.pages_written;
  
                ext4_journal_stop(handle);
  
        return;
  }
  
+ /*
+  * Force all delayed allocation blocks to be allocated for a given inode.
+  */
+ int ext4_alloc_da_blocks(struct inode *inode)
+ {
+       if (!EXT4_I(inode)->i_reserved_data_blocks &&
+           !EXT4_I(inode)->i_reserved_meta_blocks)
+               return 0;
+       /*
+        * We do something simple for now.  The filemap_flush() will
+        * also start triggering a write of the data blocks, which is
+        * not strictly speaking necessary (and for users of
+        * laptop_mode, not even desirable).  However, to do otherwise
+        * would require replicating code paths in:
+        * 
+        * ext4_da_writepages() ->
+        *    write_cache_pages() ---> (via passed in callback function)
+        *        __mpage_da_writepage() -->
+        *           mpage_add_bh_to_extent()
+        *           mpage_da_map_blocks()
+        *
+        * The problem is that write_cache_pages(), located in
+        * mm/page-writeback.c, marks pages clean in preparation for
+        * doing I/O, which is not desirable if we're not planning on
+        * doing I/O at all.
+        *
+        * We could call write_cache_pages(), and then redirty all of
+        * the pages by calling redirty_page_for_writeback() but that
+        * would be ugly in the extreme.  So instead we would need to
+        * replicate parts of the code in the above functions,
+        * simplifying them becuase we wouldn't actually intend to
+        * write out the pages, but rather only collect contiguous
+        * logical block extents, call the multi-block allocator, and
+        * then update the buffer heads with the block allocations.
+        * 
+        * For now, though, we'll cheat by calling filemap_flush(),
+        * which will map the blocks, and start the I/O, but not
+        * actually wait for the I/O to complete.
+        */
+       return filemap_flush(inode->i_mapping);
+ }
  
  /*
   * bmap() is special.  It gets used by applications such as lilo and by
@@@ -3868,6 -3949,9 +3949,9 @@@ void ext4_truncate(struct inode *inode
        if (!ext4_can_truncate(inode))
                return;
  
+       if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
+               ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
        if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
                ext4_ext_truncate(inode);
                return;
@@@ -4110,12 -4194,7 +4194,7 @@@ make_io
                        unsigned num;
  
                        table = ext4_inode_table(sb, gdp);
-                       /* Make sure s_inode_readahead_blks is a power of 2 */
-                       while (EXT4_SB(sb)->s_inode_readahead_blks &
-                              (EXT4_SB(sb)->s_inode_readahead_blks-1))
-                               EXT4_SB(sb)->s_inode_readahead_blks = 
-                                  (EXT4_SB(sb)->s_inode_readahead_blks &
-                                   (EXT4_SB(sb)->s_inode_readahead_blks-1));
+                       /* s_inode_readahead_blks is always a power of 2 */
                        b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
                        if (table > b)
                                b = table;
@@@ -4287,6 -4366,7 +4366,7 @@@ struct inode *ext4_iget(struct super_bl
        ei->i_disksize = inode->i_size;
        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
        ei->i_block_group = iloc.block_group;
+       ei->i_last_alloc_group = ~0;
        /*
         * NOTE! The in-memory inode i_data array is in little-endian order
         * even on big-endian machines: we do NOT byteswap the block numbers!
                        (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
        }
  
+       if (ei->i_flags & EXT4_EXTENTS_FL) {
+               /* Validate extent which is part of inode */
+               ret = ext4_ext_check_inode(inode);
+       } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                  (S_ISLNK(inode->i_mode) &&
+                   !ext4_inode_is_fast_symlink(inode))) {
+               /* Validate block references which are part of inode */
+               ret = ext4_check_inode_blockref(inode);
+       }
+       if (ret) {
+               brelse(bh);
+               goto bad_inode;
+       }
        if (S_ISREG(inode->i_mode)) {
                inode->i_op = &ext4_file_inode_operations;
                inode->i_fop = &ext4_file_operations;
                        inode->i_op = &ext4_symlink_inode_operations;
                        ext4_set_aops(inode);
                }
-       } else {
+       } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+             S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
                inode->i_op = &ext4_special_inode_operations;
                if (raw_inode->i_block[0])
                        init_special_inode(inode, inode->i_mode,
                else
                        init_special_inode(inode, inode->i_mode,
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
+       } else {
+               brelse(bh);
+               ret = -EIO;
+               ext4_error(inode->i_sb, __func__, 
+                          "bogus i_mode (%o) for inode=%lu",
+                          inode->i_mode, inode->i_ino);
+               goto bad_inode;
        }
        brelse(iloc.bh);
        ext4_set_inode_flags(inode);
@@@ -5146,9 -5248,8 +5248,9 @@@ static int ext4_bh_unmapped(handle_t *h
        return !buffer_mapped(bh);
  }
  
 -int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
 +int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
 +      struct page *page = vmf->page;
        loff_t size;
        unsigned long len;
        int ret = -EINVAL;
                goto out_unlock;
        ret = 0;
  out_unlock:
 +      if (ret)
 +              ret = VM_FAULT_SIGBUS;
        up_read(&inode->i_alloc_sem);
        return ret;
  }