]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/ext4/inode.c
ext4: Retry block allocation if we have free blocks left
[linux-2.6-omap-h63xx.git] / fs / ext4 / inode.c
index d1906d9a22de0b0f7c04deeecc4d7f10ebb72c0a..f97b3478eb894c4fcdc075e9bf4308e27e5f31dd 100644 (file)
@@ -190,7 +190,7 @@ static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
 /*
  * Called at the last iput() if i_nlink is zero.
  */
-void ext4_delete_inode (struct inode * inode)
+void ext4_delete_inode(struct inode *inode)
 {
        handle_t *handle;
        int err;
@@ -330,11 +330,11 @@ static int ext4_block_to_path(struct inode *inode,
        int final = 0;
 
        if (i_block < 0) {
-               ext4_warning (inode->i_sb, "ext4_block_to_path", "block < 0");
+               ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
        } else if (i_block < direct_blocks) {
                offsets[n++] = i_block;
                final = direct_blocks;
-       } else if ( (i_block -= direct_blocks) < indirect_blocks) {
+       } else if ((i_block -= direct_blocks) < indirect_blocks) {
                offsets[n++] = EXT4_IND_BLOCK;
                offsets[n++] = i_block;
                final = ptrs;
@@ -400,14 +400,14 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
 
        *err = 0;
        /* i_data is not going away, no lock needed */
-       add_chain (chain, NULL, EXT4_I(inode)->i_data + *offsets);
+       add_chain(chain, NULL, EXT4_I(inode)->i_data + *offsets);
        if (!p->key)
                goto no_block;
        while (--depth) {
                bh = sb_bread(sb, le32_to_cpu(p->key));
                if (!bh)
                        goto failure;
-               add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
+               add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
                /* Reader: end */
                if (!p->key)
                        goto no_block;
@@ -443,7 +443,7 @@ no_block:
 static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
-       __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
+       __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
        __le32 *p;
        ext4_fsblk_t bg_start;
        ext4_fsblk_t last_block;
@@ -630,7 +630,7 @@ allocated:
        *err = 0;
        return ret;
 failed_out:
-       for (i = 0; i <index; i++)
+       for (i = 0; i < index; i++)
                ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
        return ret;
 }
@@ -703,7 +703,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
                branch[n].p = (__le32 *) bh->b_data + offsets[n];
                branch[n].key = cpu_to_le32(new_blocks[n]);
                *branch[n].p = branch[n].key;
-               if ( n == indirect_blks) {
+               if (n == indirect_blks) {
                        current_block = new_blocks[n];
                        /*
                         * End of chain, update the last new metablock of
@@ -730,7 +730,7 @@ failed:
                BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
                ext4_journal_forget(handle, branch[i].bh);
        }
-       for (i = 0; i <indirect_blks; i++)
+       for (i = 0; i < indirect_blks; i++)
                ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
 
        ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
@@ -783,7 +783,7 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
        if (num == 0 && blks > 1) {
                current_block = le32_to_cpu(where->key) + 1;
                for (i = 1; i < blks; i++)
-                       *(where->p + i ) = cpu_to_le32(current_block++);
+                       *(where->p + i) = cpu_to_le32(current_block++);
        }
 
        /*
@@ -1030,19 +1030,20 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
        BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
        mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
 
-       /* Account for allocated meta_blocks */
-       mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
+       if (mdb_free) {
+               /* Account for allocated meta_blocks */
+               mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
 
-       /* update fs free blocks counter for truncate case */
-       percpu_counter_add(&sbi->s_freeblocks_counter, mdb_free);
+               /* update fs dirty blocks counter */
+               percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
+               EXT4_I(inode)->i_allocated_meta_blocks = 0;
+               EXT4_I(inode)->i_reserved_meta_blocks = mdb;
+       }
 
        /* update per-inode reservations */
        BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
        EXT4_I(inode)->i_reserved_data_blocks -= used;
 
-       BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
-       EXT4_I(inode)->i_reserved_meta_blocks = mdb;
-       EXT4_I(inode)->i_allocated_meta_blocks = 0;
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 }
 
@@ -1241,7 +1242,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
                        BUFFER_TRACE(bh, "call get_create_access");
                        fatal = ext4_journal_get_create_access(handle, bh);
                        if (!fatal && !buffer_uptodate(bh)) {
-                               memset(bh->b_data,0,inode->i_sb->s_blocksize);
+                               memset(bh->b_data, 0, inode->i_sb->s_blocksize);
                                set_buffer_uptodate(bh);
                        }
                        unlock_buffer(bh);
@@ -1266,7 +1267,7 @@ err:
 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
                               ext4_lblk_t block, int create, int *err)
 {
-       struct buffer_head * bh;
+       struct buffer_head *bh;
 
        bh = ext4_getblk(handle, inode, block, create, err);
        if (!bh)
@@ -1282,13 +1283,13 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
        return NULL;
 }
 
-static int walk_page_buffers(  handle_t *handle,
-                               struct buffer_head *head,
-                               unsigned from,
-                               unsigned to,
-                               int *partial,
-                               int (*fn)(      handle_t *handle,
-                                               struct buffer_head *bh))
+static int walk_page_buffers(handle_t *handle,
+                            struct buffer_head *head,
+                            unsigned from,
+                            unsigned to,
+                            int *partial,
+                            int (*fn)(handle_t *handle,
+                                      struct buffer_head *bh))
 {
        struct buffer_head *bh;
        unsigned block_start, block_end;
@@ -1296,9 +1297,9 @@ static int walk_page_buffers(     handle_t *handle,
        int err, ret = 0;
        struct buffer_head *next;
 
-       for (   bh = head, block_start = 0;
-               ret == 0 && (bh != head || !block_start);
-               block_start = block_end, bh = next)
+       for (bh = head, block_start = 0;
+            ret == 0 && (bh != head || !block_start);
+            block_start = block_end, bh = next)
        {
                next = bh->b_this_page;
                block_end = block_start + blocksize;
@@ -1351,23 +1352,23 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping,
                                loff_t pos, unsigned len, unsigned flags,
                                struct page **pagep, void **fsdata)
 {
-       struct inode *inode = mapping->host;
+       struct inode *inode = mapping->host;
        int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
        handle_t *handle;
        int retries = 0;
-       struct page *page;
+       struct page *page;
        pgoff_t index;
-       unsigned from, to;
+       unsigned from, to;
 
        index = pos >> PAGE_CACHE_SHIFT;
-       from = pos & (PAGE_CACHE_SIZE - 1);
-       to = from + len;
+       from = pos & (PAGE_CACHE_SIZE - 1);
+       to = from + len;
 
 retry:
-       handle = ext4_journal_start(inode, needed_blocks);
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
+       handle = ext4_journal_start(inode, needed_blocks);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
        }
 
        page = __grab_cache_page(mapping, index);
@@ -1387,9 +1388,9 @@ retry:
        }
 
        if (ret) {
-               unlock_page(page);
+               unlock_page(page);
                ext4_journal_stop(handle);
-               page_cache_release(page);
+               page_cache_release(page);
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -1521,6 +1522,7 @@ static int ext4_journalled_write_end(struct file *file,
 
 static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
 {
+       int retries = 0;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        unsigned long md_needed, mdblocks, total = 0;
 
@@ -1529,6 +1531,7 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
         * in order to allocate nrblocks
         * worse case is one extent per block
         */
+repeat:
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
        total = EXT4_I(inode)->i_reserved_data_blocks + nrblocks;
        mdblocks = ext4_calc_metadata_amount(inode, total);
@@ -1537,13 +1540,14 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
        md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
        total = md_needed + nrblocks;
 
-       if (ext4_has_free_blocks(sbi, total) < total) {
+       if (ext4_claim_free_blocks(sbi, total)) {
                spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+                       yield();
+                       goto repeat;
+               }
                return -ENOSPC;
        }
-       /* reduce fs free blocks counter */
-       percpu_counter_sub(&sbi->s_freeblocks_counter, total);
-
        EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
        EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
 
@@ -1585,8 +1589,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
 
        release = to_free + mdb_free;
 
-       /* update fs free blocks counter for truncate case */
-       percpu_counter_add(&sbi->s_freeblocks_counter, release);
+       /* update fs dirty blocks counter for truncate case */
+       percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
 
        /* update per-inode reservations */
        BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
@@ -1630,6 +1634,7 @@ struct mpage_da_data {
        struct writeback_control *wbc;
        int io_done;
        long pages_written;
+       int retval;
 };
 
 /*
@@ -1783,6 +1788,57 @@ static inline void __unmap_underlying_blocks(struct inode *inode,
                unmap_underlying_metadata(bdev, bh->b_blocknr + i);
 }
 
+static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
+                                       sector_t logical, long blk_cnt)
+{
+       int nr_pages, i;
+       pgoff_t index, end;
+       struct pagevec pvec;
+       struct inode *inode = mpd->inode;
+       struct address_space *mapping = inode->i_mapping;
+
+       index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       end   = (logical + blk_cnt - 1) >>
+                               (PAGE_CACHE_SHIFT - inode->i_blkbits);
+       while (index <= end) {
+               nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
+               if (nr_pages == 0)
+                       break;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+                       index = page->index;
+                       if (index > end)
+                               break;
+                       index++;
+
+                       BUG_ON(!PageLocked(page));
+                       BUG_ON(PageWriteback(page));
+                       block_invalidatepage(page, 0);
+                       ClearPageUptodate(page);
+                       unlock_page(page);
+               }
+       }
+       return;
+}
+
+static void ext4_print_free_blocks(struct inode *inode)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+       printk(KERN_EMERG "Total free blocks count %lld\n",
+                       ext4_count_free_blocks(inode->i_sb));
+       printk(KERN_EMERG "Free/Dirty block details\n");
+       printk(KERN_EMERG "free_blocks=%lld\n",
+                       percpu_counter_sum(&sbi->s_freeblocks_counter));
+       printk(KERN_EMERG "dirty_blocks=%lld\n",
+                       percpu_counter_sum(&sbi->s_dirtyblocks_counter));
+       printk(KERN_EMERG "Block reservation details\n");
+       printk(KERN_EMERG "i_reserved_data_blocks=%lu\n",
+                       EXT4_I(inode)->i_reserved_data_blocks);
+       printk(KERN_EMERG "i_reserved_meta_blocks=%lu\n",
+                       EXT4_I(inode)->i_reserved_meta_blocks);
+       return;
+}
+
 /*
  * mpage_da_map_blocks - go through given space
  *
@@ -1792,32 +1848,69 @@ static inline void __unmap_underlying_blocks(struct inode *inode,
  * The function skips space we know is already mapped to disk blocks.
  *
  */
-static void mpage_da_map_blocks(struct mpage_da_data *mpd)
+static int  mpage_da_map_blocks(struct mpage_da_data *mpd)
 {
        int err = 0;
-       struct buffer_head *lbh = &mpd->lbh;
-       sector_t next = lbh->b_blocknr;
        struct buffer_head new;
+       struct buffer_head *lbh = &mpd->lbh;
+       sector_t next;
 
        /*
         * We consider only non-mapped and non-allocated blocks
         */
        if (buffer_mapped(lbh) && !buffer_delay(lbh))
-               return;
-
+               return 0;
        new.b_state = lbh->b_state;
        new.b_blocknr = 0;
        new.b_size = lbh->b_size;
-
+       next = lbh->b_blocknr;
        /*
         * If we didn't accumulate anything
         * to write simply return
         */
        if (!new.b_size)
-               return;
+               return 0;
        err = mpd->get_block(mpd->inode, next, &new, 1);
-       if (err)
-               return;
+       if (err) {
+
+               /* If get block returns with error
+                * we simply return. Later writepage
+                * will redirty the page and writepages
+                * will find the dirty page again
+                */
+               if (err == -EAGAIN)
+                       return 0;
+
+               if (err == -ENOSPC &&
+                               ext4_count_free_blocks(mpd->inode->i_sb)) {
+                       mpd->retval = err;
+                       return 0;
+               }
+
+               /*
+                * get block failure will cause us
+                * to loop in writepages. Because
+                * a_ops->writepage won't be able to
+                * make progress. The page will be redirtied
+                * by writepage and writepages will again
+                * try to write the same.
+                */
+               printk(KERN_EMERG "%s block allocation failed for inode %lu "
+                                 "at logical offset %llu with max blocks "
+                                 "%zd with error %d\n",
+                                 __func__, mpd->inode->i_ino,
+                                 (unsigned long long)next,
+                                 lbh->b_size >> mpd->inode->i_blkbits, err);
+               printk(KERN_EMERG "This should not happen.!! "
+                                       "Data will be lost\n");
+               if (err == -ENOSPC) {
+                       ext4_print_free_blocks(mpd->inode);
+               }
+               /* invlaidate all the pages */
+               ext4_da_block_invalidatepages(mpd, next,
+                               lbh->b_size >> mpd->inode->i_blkbits);
+               return err;
+       }
        BUG_ON(new.b_size == 0);
 
        if (buffer_new(&new))
@@ -1830,7 +1923,7 @@ static void mpage_da_map_blocks(struct mpage_da_data *mpd)
        if (buffer_delay(lbh) || buffer_unwritten(lbh))
                mpage_put_bnr_to_bhs(mpd, next, &new);
 
-       return;
+       return 0;
 }
 
 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
@@ -1899,8 +1992,8 @@ flush_it:
         * We couldn't merge the block to our extent, so we
         * need to flush current  extent and start new one
         */
-       mpage_da_map_blocks(mpd);
-       mpage_da_submit_io(mpd);
+       if (mpage_da_map_blocks(mpd) == 0)
+               mpage_da_submit_io(mpd);
        mpd->io_done = 1;
        return;
 }
@@ -1942,8 +2035,8 @@ static int __mpage_da_writepage(struct page *page,
                 * and start IO on them using writepage()
                 */
                if (mpd->next_page != mpd->first_page) {
-                       mpage_da_map_blocks(mpd);
-                       mpage_da_submit_io(mpd);
+                       if (mpage_da_map_blocks(mpd) == 0)
+                               mpage_da_submit_io(mpd);
                        /*
                         * skip rest of the page in the page_vec
                         */
@@ -2018,39 +2111,36 @@ static int __mpage_da_writepage(struct page *page,
  */
 static int mpage_da_writepages(struct address_space *mapping,
                               struct writeback_control *wbc,
-                              get_block_t get_block)
+                              struct mpage_da_data *mpd)
 {
-       struct mpage_da_data mpd;
        long to_write;
        int ret;
 
-       if (!get_block)
+       if (!mpd->get_block)
                return generic_writepages(mapping, wbc);
 
-       mpd.wbc = wbc;
-       mpd.inode = mapping->host;
-       mpd.lbh.b_size = 0;
-       mpd.lbh.b_state = 0;
-       mpd.lbh.b_blocknr = 0;
-       mpd.first_page = 0;
-       mpd.next_page = 0;
-       mpd.get_block = get_block;
-       mpd.io_done = 0;
-       mpd.pages_written = 0;
+       mpd->lbh.b_size = 0;
+       mpd->lbh.b_state = 0;
+       mpd->lbh.b_blocknr = 0;
+       mpd->first_page = 0;
+       mpd->next_page = 0;
+       mpd->io_done = 0;
+       mpd->pages_written = 0;
+       mpd->retval = 0;
 
        to_write = wbc->nr_to_write;
 
-       ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, &mpd);
+       ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd);
 
        /*
         * Handle last extent of pages
         */
-       if (!mpd.io_done && mpd.next_page != mpd.first_page) {
-               mpage_da_map_blocks(&mpd);
-               mpage_da_submit_io(&mpd);
+       if (!mpd->io_done && mpd->next_page != mpd->first_page) {
+               if (mpage_da_map_blocks(mpd) == 0)
+                       mpage_da_submit_io(mpd);
        }
 
-       wbc->nr_to_write = to_write - mpd.pages_written;
+       wbc->nr_to_write = to_write - mpd->pages_written;
        return ret;
 }
 
@@ -2103,18 +2193,24 @@ static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
        handle_t *handle = NULL;
 
        handle = ext4_journal_current_handle();
-       if (!handle) {
-               ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
-                                  bh_result, 0, 0, 0);
-               BUG_ON(!ret);
-       } else {
-               ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
-                                  bh_result, create, 0, EXT4_DELALLOC_RSVED);
-       }
-
+       BUG_ON(!handle);
+       ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
+                       bh_result, create, 0, EXT4_DELALLOC_RSVED);
        if (ret > 0) {
+
                bh_result->b_size = (ret << inode->i_blkbits);
 
+               if (ext4_should_order_data(inode)) {
+                       int retval;
+                       retval = ext4_jbd2_file_inode(handle, inode);
+                       if (retval)
+                               /*
+                                * Failed to add inode for ordered
+                                * mode. Don't update file size
+                                */
+                               return retval;
+               }
+
                /*
                 * Update on-disk size along with block allocation
                 * we don't use 'extend_disksize' as size may change
@@ -2282,13 +2378,13 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
 static int ext4_da_writepages(struct address_space *mapping,
                              struct writeback_control *wbc)
 {
-       struct inode *inode = mapping->host;
        handle_t *handle = NULL;
-       int needed_blocks;
-       int ret = 0;
-       long to_write;
        loff_t range_start = 0;
-       long pages_skipped = 0;
+       struct mpage_da_data mpd;
+       struct inode *inode = mapping->host;
+       int needed_blocks, ret = 0, nr_to_writebump = 0;
+       long to_write, pages_skipped = 0;
+       struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
        /*
         * No pages to write? This is mainly a kludge to avoid starting
@@ -2297,6 +2393,16 @@ static int ext4_da_writepages(struct address_space *mapping,
         */
        if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
                return 0;
+       /*
+        * Make sure nr_to_write is >= sbi->s_mb_stream_request
+        * This make sure small files blocks are allocated in
+        * single attempt. This ensure that small files
+        * get less fragmented.
+        */
+       if (wbc->nr_to_write < sbi->s_mb_stream_request) {
+               nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
+               wbc->nr_to_write = sbi->s_mb_stream_request;
+       }
 
        if (!wbc->range_cyclic)
                /*
@@ -2308,6 +2414,9 @@ static int ext4_da_writepages(struct address_space *mapping,
        range_start =  wbc->range_start;
        pages_skipped = wbc->pages_skipped;
 
+       mpd.wbc = wbc;
+       mpd.inode = mapping->host;
+
 restart_loop:
        to_write = wbc->nr_to_write;
        while (!ret && to_write > 0) {
@@ -2331,23 +2440,17 @@ restart_loop:
                        dump_stack();
                        goto out_writepages;
                }
-               if (ext4_should_order_data(inode)) {
-                       /*
-                        * With ordered mode we need to add
-                        * the inode to the journal handl
-                        * when we do block allocation.
-                        */
-                       ret = ext4_jbd2_file_inode(handle, inode);
-                       if (ret) {
-                               ext4_journal_stop(handle);
-                               goto out_writepages;
-                       }
-               }
-
                to_write -= wbc->nr_to_write;
-               ret = mpage_da_writepages(mapping, wbc,
-                                         ext4_da_get_block_write);
+
+               mpd.get_block = ext4_da_get_block_write;
+               ret = mpage_da_writepages(mapping, wbc, &mpd);
+
                ext4_journal_stop(handle);
+
+               if (mpd.retval == -ENOSPC)
+                       jbd2_journal_force_commit_nested(sbi->s_journal);
+
+               /* reset the retry count */
                if (ret == MPAGE_DA_EXTENT_TAIL) {
                        /*
                         * got one extent now try with
@@ -2377,11 +2480,38 @@ restart_loop:
        }
 
 out_writepages:
-       wbc->nr_to_write = to_write;
+       wbc->nr_to_write = to_write - nr_to_writebump;
        wbc->range_start = range_start;
        return ret;
 }
 
+#define FALL_BACK_TO_NONDELALLOC 1
+static int ext4_nonda_switch(struct super_block *sb)
+{
+       s64 free_blocks, dirty_blocks;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       /*
+        * switch to non delalloc mode if we are running low
+        * on free block. The free block accounting via percpu
+        * counters can get slightly wrong with FBC_BATCH getting
+        * accumulated on each CPU without updating global counters
+        * Delalloc need an accurate free block accounting. So switch
+        * to non delalloc when we are near to error range.
+        */
+       free_blocks  = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
+       dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyblocks_counter);
+       if (2 * free_blocks < 3 * dirty_blocks ||
+               free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
+               /*
+                * free block count is less that 150% of dirty blocks
+                * or free blocks is less that watermark
+                */
+               return 1;
+       }
+       return 0;
+}
+
 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                                loff_t pos, unsigned len, unsigned flags,
                                struct page **pagep, void **fsdata)
@@ -2397,6 +2527,12 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
 
+       if (ext4_nonda_switch(inode->i_sb)) {
+               *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
+               return ext4_write_begin(file, mapping, pos,
+                                       len, flags, pagep, fsdata);
+       }
+       *fsdata = (void *)0;
 retry:
        /*
         * With delayed allocation, we don't log the i_disksize update
@@ -2447,7 +2583,7 @@ static int ext4_da_should_update_i_disksize(struct page *page,
        bh = page_buffers(page);
        idx = offset >> inode->i_blkbits;
 
-       for (i=0; i < idx; i++)
+       for (i = 0; i < idx; i++)
                bh = bh->b_this_page;
 
        if (!buffer_mapped(bh) || (buffer_delay(bh)))
@@ -2465,9 +2601,22 @@ static int ext4_da_write_end(struct file *file,
        handle_t *handle = ext4_journal_current_handle();
        loff_t new_i_size;
        unsigned long start, end;
+       int write_mode = (int)(unsigned long)fsdata;
+
+       if (write_mode == FALL_BACK_TO_NONDELALLOC) {
+               if (ext4_should_order_data(inode)) {
+                       return ext4_ordered_write_end(file, mapping, pos,
+                                       len, copied, page, fsdata);
+               } else if (ext4_should_writeback_data(inode)) {
+                       return ext4_writeback_write_end(file, mapping, pos,
+                                       len, copied, page, fsdata);
+               } else {
+                       BUG();
+               }
+       }
 
        start = pos & (PAGE_CACHE_SIZE - 1);
-       end = start + copied -1;
+       end = start + copied - 1;
 
        /*
         * generic_write_end() will run mark_inode_dirty() if i_size
@@ -2582,7 +2731,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
                        return 0;
        }
 
-       return generic_block_bmap(mapping,block,ext4_get_block);
+       return generic_block_bmap(mapping, block, ext4_get_block);
 }
 
 static int bget_one(handle_t *handle, struct buffer_head *bh)
@@ -3188,7 +3337,7 @@ static Indirect *ext4_find_shared(struct inode *inode, int depth,
        if (!partial->key && *partial->p)
                /* Writer: end */
                goto no_top;
-       for (p=partial; p>chain && all_zeroes((__le32*)p->bh->b_data,p->p); p--)
+       for (p = partial; (p > chain) && all_zeroes((__le32 *) p->bh->b_data, p->p); p--)
                ;
        /*
         * OK, we've found the last block that must survive. The rest of our
@@ -3207,7 +3356,7 @@ static Indirect *ext4_find_shared(struct inode *inode, int depth,
        }
        /* Writer: end */
 
-       while(partial > p) {
+       while (partial > p) {
                brelse(partial->bh);
                partial--;
        }
@@ -3399,9 +3548,9 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
                        /* This zaps the entire block.  Bottom up. */
                        BUFFER_TRACE(bh, "free child branches");
                        ext4_free_branches(handle, inode, bh,
-                                          (__le32*)bh->b_data,
-                                          (__le32*)bh->b_data + addr_per_block,
-                                          depth);
+                                       (__le32 *) bh->b_data,
+                                       (__le32 *) bh->b_data + addr_per_block,
+                                       depth);
 
                        /*
                         * We've probably journalled the indirect block several
@@ -3918,7 +4067,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
        inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
        inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
-       if(!(test_opt (inode->i_sb, NO_UID32))) {
+       if (!(test_opt(inode->i_sb, NO_UID32))) {
                inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
                inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
        }
@@ -3936,7 +4085,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                if (inode->i_mode == 0 ||
                    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
                        /* this inode is deleted */
-                       brelse (bh);
+                       brelse(bh);
                        ret = -ESTALE;
                        goto bad_inode;
                }
@@ -3969,7 +4118,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
                if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
                    EXT4_INODE_SIZE(inode->i_sb)) {
-                       brelse (bh);
+                       brelse(bh);
                        ret = -EIO;
                        goto bad_inode;
                }
@@ -4022,7 +4171,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        init_special_inode(inode, inode->i_mode,
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        }
-       brelse (iloc.bh);
+       brelse(iloc.bh);
        ext4_set_inode_flags(inode);
        unlock_new_inode(inode);
        return inode;
@@ -4104,14 +4253,14 @@ static int ext4_do_update_inode(handle_t *handle,
 
        ext4_get_inode_flags(ei);
        raw_inode->i_mode = cpu_to_le16(inode->i_mode);
-       if(!(test_opt(inode->i_sb, NO_UID32))) {
+       if (!(test_opt(inode->i_sb, NO_UID32))) {
                raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
                raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
 /*
  * Fix up interoperability with old kernels. Otherwise, old inodes get
  * re-used with the upper 16 bits of the uid/gid intact
  */
-               if(!ei->i_dtime) {
+               if (!ei->i_dtime) {
                        raw_inode->i_uid_high =
                                cpu_to_le16(high_16_bits(inode->i_uid));
                        raw_inode->i_gid_high =
@@ -4199,7 +4348,7 @@ static int ext4_do_update_inode(handle_t *handle,
        ei->i_state &= ~EXT4_STATE_NEW;
 
 out_brelse:
-       brelse (bh);
+       brelse(bh);
        ext4_std_error(inode->i_sb, err);
        return err;
 }
@@ -4802,6 +4951,7 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
        loff_t size;
        unsigned long len;
        int ret = -EINVAL;
+       void *fsdata;
        struct file *file = vma->vm_file;
        struct inode *inode = file->f_path.dentry->d_inode;
        struct address_space *mapping = inode->i_mapping;
@@ -4840,11 +4990,11 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
         * on the same page though
         */
        ret = mapping->a_ops->write_begin(file, mapping, page_offset(page),
-                       len, AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
+                       len, AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
        if (ret < 0)
                goto out_unlock;
        ret = mapping->a_ops->write_end(file, mapping, page_offset(page),
-                       len, len, page, NULL);
+                       len, len, page, fsdata);
        if (ret < 0)
                goto out_unlock;
        ret = 0;