]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blobdiff - fs/ext4/mballoc.c
ext4: Convert the usage of NR_CPUS to nr_cpu_ids.
[linux-2.6-omap-h63xx.git] / fs / ext4 / mballoc.c
index 816ba8cce79a84b108af24cb86257f3740179ad2..49bec8404c5fe122974ee68363c6faa588b2d9f4 100644 (file)
@@ -787,13 +787,16 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
                if (bh_uptodate_or_lock(bh[i]))
                        continue;
 
+               spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
                if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
                        ext4_init_block_bitmap(sb, bh[i],
                                                first_group + i, desc);
                        set_buffer_uptodate(bh[i]);
                        unlock_buffer(bh[i]);
+                       spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
                        continue;
                }
+               spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
                get_bh(bh[i]);
                bh[i]->b_end_io = end_buffer_read_sync;
                submit_bh(READ, bh[i]);
@@ -2236,21 +2239,192 @@ ext4_mb_store_history(struct ext4_allocation_context *ac)
 #define ext4_mb_history_init(sb)
 #endif
 
+
+/* Create and initialize ext4_group_info data for the given group. */
+int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
+                         struct ext4_group_desc *desc)
+{
+       int i, len;
+       int metalen = 0;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_group_info **meta_group_info;
+
+       /*
+        * First check if this group is the first of a reserved block.
+        * If it's true, we have to allocate a new table of pointers
+        * to ext4_group_info structures
+        */
+       if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
+               metalen = sizeof(*meta_group_info) <<
+                       EXT4_DESC_PER_BLOCK_BITS(sb);
+               meta_group_info = kmalloc(metalen, GFP_KERNEL);
+               if (meta_group_info == NULL) {
+                       printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
+                              "buddy group\n");
+                       goto exit_meta_group_info;
+               }
+               sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
+                       meta_group_info;
+       }
+
+       /*
+        * calculate needed size. if change bb_counters size,
+        * don't forget about ext4_mb_generate_buddy()
+        */
+       len = offsetof(typeof(**meta_group_info),
+                      bb_counters[sb->s_blocksize_bits + 2]);
+
+       meta_group_info =
+               sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
+       i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
+
+       meta_group_info[i] = kzalloc(len, GFP_KERNEL);
+       if (meta_group_info[i] == NULL) {
+               printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
+               goto exit_group_info;
+       }
+       set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
+               &(meta_group_info[i]->bb_state));
+
+       /*
+        * initialize bb_free to be able to skip
+        * empty groups without initialization
+        */
+       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               meta_group_info[i]->bb_free =
+                       ext4_free_blocks_after_init(sb, group, desc);
+       } else {
+               meta_group_info[i]->bb_free =
+                       le16_to_cpu(desc->bg_free_blocks_count);
+       }
+
+       INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
+
+#ifdef DOUBLE_CHECK
+       {
+               struct buffer_head *bh;
+               meta_group_info[i]->bb_bitmap =
+                       kmalloc(sb->s_blocksize, GFP_KERNEL);
+               BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
+               bh = ext4_read_block_bitmap(sb, group);
+               BUG_ON(bh == NULL);
+               memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
+                       sb->s_blocksize);
+               put_bh(bh);
+       }
+#endif
+
+       return 0;
+
+exit_group_info:
+       /* If a meta_group_info table has been allocated, release it now */
+       if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
+               kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
+exit_meta_group_info:
+       return -ENOMEM;
+} /* ext4_mb_add_groupinfo */
+
+/*
+ * Add a group to the existing groups.
+ * This function is used for online resize
+ */
+int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
+                              struct ext4_group_desc *desc)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct inode *inode = sbi->s_buddy_cache;
+       int blocks_per_page;
+       int block;
+       int pnum;
+       struct page *page;
+       int err;
+
+       /* Add group based on group descriptor*/
+       err = ext4_mb_add_groupinfo(sb, group, desc);
+       if (err)
+               return err;
+
+       /*
+        * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
+        * datas) are set not up to date so that they will be re-initilaized
+        * during the next call to ext4_mb_load_buddy
+        */
+
+       /* Set buddy page as not up to date */
+       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
+       block = group * 2;
+       pnum = block / blocks_per_page;
+       page = find_get_page(inode->i_mapping, pnum);
+       if (page != NULL) {
+               ClearPageUptodate(page);
+               page_cache_release(page);
+       }
+
+       /* Set bitmap page as not up to date */
+       block++;
+       pnum = block / blocks_per_page;
+       page = find_get_page(inode->i_mapping, pnum);
+       if (page != NULL) {
+               ClearPageUptodate(page);
+               page_cache_release(page);
+       }
+
+       return 0;
+}
+
+/*
+ * Update an existing group.
+ * This function is used for online resize
+ */
+void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add)
+{
+       grp->bb_free += add;
+}
+
 static int ext4_mb_init_backend(struct super_block *sb)
 {
        ext4_group_t i;
-       int j, len, metalen;
+       int metalen;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       int num_meta_group_infos =
-               (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) >>
-                       EXT4_DESC_PER_BLOCK_BITS(sb);
+       struct ext4_super_block *es = sbi->s_es;
+       int num_meta_group_infos;
+       int num_meta_group_infos_max;
+       int array_size;
        struct ext4_group_info **meta_group_info;
+       struct ext4_group_desc *desc;
+
+       /* This is the number of blocks used by GDT */
+       num_meta_group_infos = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) -
+                               1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
+
+       /*
+        * This is the total number of blocks used by GDT including
+        * the number of reserved blocks for GDT.
+        * The s_group_info array is allocated with this value
+        * to allow a clean online resize without a complex
+        * manipulation of pointer.
+        * The drawback is the unused memory when no resize
+        * occurs but it's very low in terms of pages
+        * (see comments below)
+        * Need to handle this properly when META_BG resizing is allowed
+        */
+       num_meta_group_infos_max = num_meta_group_infos +
+                               le16_to_cpu(es->s_reserved_gdt_blocks);
 
+       /*
+        * array_size is the size of s_group_info array. We round it
+        * to the next power of two because this approximation is done
+        * internally by kmalloc so we can have some more memory
+        * for free here (e.g. may be used for META_BG resize).
+        */
+       array_size = 1;
+       while (array_size < sizeof(*sbi->s_group_info) *
+              num_meta_group_infos_max)
+               array_size = array_size << 1;
        /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
         * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
         * So a two level scheme suffices for now. */
-       sbi->s_group_info = kmalloc(sizeof(*sbi->s_group_info) *
-                                   num_meta_group_infos, GFP_KERNEL);
+       sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
        if (sbi->s_group_info == NULL) {
                printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
                return -ENOMEM;
@@ -2277,62 +2451,15 @@ static int ext4_mb_init_backend(struct super_block *sb)
                sbi->s_group_info[i] = meta_group_info;
        }
 
-       /*
-        * calculate needed size. if change bb_counters size,
-        * don't forget about ext4_mb_generate_buddy()
-        */
-       len = sizeof(struct ext4_group_info);
-       len += sizeof(unsigned short) * (sb->s_blocksize_bits + 2);
        for (i = 0; i < sbi->s_groups_count; i++) {
-               struct ext4_group_desc *desc;
-
-               meta_group_info =
-                       sbi->s_group_info[i >> EXT4_DESC_PER_BLOCK_BITS(sb)];
-               j = i & (EXT4_DESC_PER_BLOCK(sb) - 1);
-
-               meta_group_info[j] = kzalloc(len, GFP_KERNEL);
-               if (meta_group_info[j] == NULL) {
-                       printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
-                       goto err_freebuddy;
-               }
                desc = ext4_get_group_desc(sb, i, NULL);
                if (desc == NULL) {
                        printk(KERN_ERR
                                "EXT4-fs: can't read descriptor %lu\n", i);
-                       i++;
                        goto err_freebuddy;
                }
-               set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
-                       &(meta_group_info[j]->bb_state));
-
-               /*
-                * initialize bb_free to be able to skip
-                * empty groups without initialization
-                */
-               if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-                       meta_group_info[j]->bb_free =
-                               ext4_free_blocks_after_init(sb, i, desc);
-               } else {
-                       meta_group_info[j]->bb_free =
-                               le16_to_cpu(desc->bg_free_blocks_count);
-               }
-
-               INIT_LIST_HEAD(&meta_group_info[j]->bb_prealloc_list);
-
-#ifdef DOUBLE_CHECK
-               {
-                       struct buffer_head *bh;
-                       meta_group_info[j]->bb_bitmap =
-                               kmalloc(sb->s_blocksize, GFP_KERNEL);
-                       BUG_ON(meta_group_info[j]->bb_bitmap == NULL);
-                       bh = ext4_read_block_bitmap(sb, i);
-                       BUG_ON(bh == NULL);
-                       memcpy(meta_group_info[j]->bb_bitmap, bh->b_data,
-                                       sb->s_blocksize);
-                       put_bh(bh);
-               }
-#endif
-
+               if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
+                       goto err_freebuddy;
        }
 
        return 0;
@@ -2413,7 +2540,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
        sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
        sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
 
-       i = sizeof(struct ext4_locality_group) * NR_CPUS;
+       i = sizeof(struct ext4_locality_group) * nr_cpu_ids;
        sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
        if (sbi->s_locality_groups == NULL) {
                clear_opt(sbi->s_mount_opt, MBALLOC);
@@ -2421,7 +2548,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
                kfree(sbi->s_mb_maxs);
                return -ENOMEM;
        }
-       for (i = 0; i < NR_CPUS; i++) {
+       for (i = 0; i < nr_cpu_ids; i++) {
                struct ext4_locality_group *lg;
                lg = &sbi->s_locality_groups[i];
                mutex_init(&lg->lg_mutex);
@@ -2840,7 +2967,15 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
        le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
        gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
        spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
-       percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
+
+       /*
+        * free blocks account has already be reduced/reserved
+        * at write_begin() time for delayed allocation
+        * do not double accounting
+        */
+       if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
+               percpu_counter_sub(&sbi->s_freeblocks_counter,
+                                       ac->ac_b_ex.fe_len);
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -3601,20 +3736,23 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
 
        bitmap_bh = ext4_read_block_bitmap(sb, group);
        if (bitmap_bh == NULL) {
-               /* error handling here */
-               ext4_mb_release_desc(&e4b);
-               BUG_ON(bitmap_bh == NULL);
+               ext4_error(sb, __func__, "Error in reading block "
+                               "bitmap for %lu\n", group);
+               return 0;
        }
 
        err = ext4_mb_load_buddy(sb, group, &e4b);
-       BUG_ON(err != 0); /* error handling here */
+       if (err) {
+               ext4_error(sb, __func__, "Error in loading buddy "
+                               "information for %lu\n", group);
+               put_bh(bitmap_bh);
+               return 0;
+       }
 
        if (needed == 0)
                needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
 
-       grp = ext4_get_group_info(sb, group);
        INIT_LIST_HEAD(&list);
-
        ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
 repeat:
        ext4_lock_group(sb, group);
@@ -3771,13 +3909,18 @@ repeat:
                ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
 
                err = ext4_mb_load_buddy(sb, group, &e4b);
-               BUG_ON(err != 0); /* error handling here */
+               if (err) {
+                       ext4_error(sb, __func__, "Error in loading buddy "
+                                       "information for %lu\n", group);
+                       continue;
+               }
 
                bitmap_bh = ext4_read_block_bitmap(sb, group);
                if (bitmap_bh == NULL) {
-                       /* error handling here */
+                       ext4_error(sb, __func__, "Error in reading block "
+                                       "bitmap for %lu\n", group);
                        ext4_mb_release_desc(&e4b);
-                       BUG_ON(bitmap_bh == NULL);
+                       continue;
                }
 
                ext4_lock_group(sb, group);
@@ -4045,6 +4188,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
                                            &(ar->len), errp);
                return block;
        }
+       if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
+               /*
+                * With delalloc we already reserved the blocks
+                */
+               ar->len = ext4_has_free_blocks(sbi, ar->len);
+       }
+
+       if (ar->len == 0) {
+               *errp = -ENOSPC;
+               return 0;
+       }
 
        while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
                ar->flags |= EXT4_MB_HINT_NOPREALLOC;
@@ -4056,6 +4210,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
        }
        inquota = ar->len;
 
+       if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+               ar->flags |= EXT4_MB_DELALLOC_RESERVED;
+
        ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
        if (!ac) {
                ar->len = 0;
@@ -4073,7 +4230,6 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
 
        ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
        if (!ext4_mb_use_preallocated(ac)) {
-
                ac->ac_op = EXT4_MB_HISTORY_ALLOC;
                ext4_mb_normalize_request(ac, ar);
 repeat:
@@ -4275,11 +4431,15 @@ do_more:
                count -= overflow;
        }
        bitmap_bh = ext4_read_block_bitmap(sb, block_group);
-       if (!bitmap_bh)
+       if (!bitmap_bh) {
+               err = -EIO;
                goto error_return;
+       }
        gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
-       if (!gdp)
+       if (!gdp) {
+               err = -EIO;
                goto error_return;
+       }
 
        if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
            in_range(ext4_inode_bitmap(sb, gdp), block, count) ||