2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
26 #include "print-tree.h"
27 #include "transaction.h"
30 #include "ref-cache.h"
32 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
33 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
34 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
36 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
38 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
39 btrfs_root *extent_root);
40 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
41 btrfs_root *extent_root);
42 static struct btrfs_block_group_cache *
43 __btrfs_find_block_group(struct btrfs_root *root,
44 struct btrfs_block_group_cache *hint,
45 u64 search_start, int data, int owner);
47 void maybe_lock_mutex(struct btrfs_root *root)
49 if (root != root->fs_info->extent_root &&
50 root != root->fs_info->chunk_root &&
51 root != root->fs_info->dev_root) {
52 mutex_lock(&root->fs_info->alloc_mutex);
56 void maybe_unlock_mutex(struct btrfs_root *root)
58 if (root != root->fs_info->extent_root &&
59 root != root->fs_info->chunk_root &&
60 root != root->fs_info->dev_root) {
61 mutex_unlock(&root->fs_info->alloc_mutex);
65 static int cache_block_group(struct btrfs_root *root,
66 struct btrfs_block_group_cache *block_group)
68 struct btrfs_path *path;
71 struct extent_buffer *leaf;
72 struct extent_io_tree *free_space_cache;
82 root = root->fs_info->extent_root;
83 free_space_cache = &root->fs_info->free_space_cache;
85 if (block_group->cached)
88 path = btrfs_alloc_path();
94 * we get into deadlocks with paths held by callers of this function.
95 * since the alloc_mutex is protecting things right now, just
96 * skip the locking here
98 path->skip_locking = 1;
99 first_free = block_group->key.objectid;
100 key.objectid = block_group->key.objectid;
102 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
106 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
110 leaf = path->nodes[0];
111 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
112 if (key.objectid + key.offset > first_free)
113 first_free = key.objectid + key.offset;
116 leaf = path->nodes[0];
117 slot = path->slots[0];
118 if (slot >= btrfs_header_nritems(leaf)) {
119 ret = btrfs_next_leaf(root, path);
128 btrfs_item_key_to_cpu(leaf, &key, slot);
129 if (key.objectid < block_group->key.objectid) {
132 if (key.objectid >= block_group->key.objectid +
133 block_group->key.offset) {
137 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
142 if (key.objectid > last) {
143 hole_size = key.objectid - last;
144 set_extent_dirty(free_space_cache, last,
145 last + hole_size - 1,
148 last = key.objectid + key.offset;
156 if (block_group->key.objectid +
157 block_group->key.offset > last) {
158 hole_size = block_group->key.objectid +
159 block_group->key.offset - last;
160 set_extent_dirty(free_space_cache, last,
161 last + hole_size - 1, GFP_NOFS);
163 block_group->cached = 1;
165 btrfs_free_path(path);
169 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
173 struct extent_io_tree *block_group_cache;
174 struct btrfs_block_group_cache *block_group = NULL;
180 bytenr = max_t(u64, bytenr,
181 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
182 block_group_cache = &info->block_group_cache;
183 ret = find_first_extent_bit(block_group_cache,
184 bytenr, &start, &end,
185 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
190 ret = get_state_private(block_group_cache, start, &ptr);
194 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
198 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
202 struct extent_io_tree *block_group_cache;
203 struct btrfs_block_group_cache *block_group = NULL;
209 bytenr = max_t(u64, bytenr,
210 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
211 block_group_cache = &info->block_group_cache;
212 ret = find_first_extent_bit(block_group_cache,
213 bytenr, &start, &end,
214 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
219 ret = get_state_private(block_group_cache, start, &ptr);
223 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
224 if (block_group->key.objectid <= bytenr && bytenr <
225 block_group->key.objectid + block_group->key.offset)
230 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
232 return (cache->flags & bits) == bits;
235 static int noinline find_search_start(struct btrfs_root *root,
236 struct btrfs_block_group_cache **cache_ret,
237 u64 *start_ret, u64 num, int data)
240 struct btrfs_block_group_cache *cache = *cache_ret;
241 struct extent_io_tree *free_space_cache;
242 struct extent_state *state;
247 u64 search_start = *start_ret;
250 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
251 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
252 free_space_cache = &root->fs_info->free_space_cache;
258 ret = cache_block_group(root, cache);
263 last = max(search_start, cache->key.objectid);
264 if (!block_group_bits(cache, data) || cache->ro)
267 spin_lock_irq(&free_space_cache->lock);
268 state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
273 spin_unlock_irq(&free_space_cache->lock);
277 start = max(last, state->start);
278 last = state->end + 1;
279 if (last - start < num) {
281 state = extent_state_next(state);
282 } while(state && !(state->state & EXTENT_DIRTY));
285 spin_unlock_irq(&free_space_cache->lock);
289 if (start + num > cache->key.objectid + cache->key.offset)
291 if (!block_group_bits(cache, data)) {
292 printk("block group bits don't match %Lu %d\n", cache->flags, data);
298 cache = btrfs_lookup_block_group(root->fs_info, search_start);
300 printk("Unable to find block group for %Lu\n", search_start);
306 last = cache->key.objectid + cache->key.offset;
308 cache = btrfs_lookup_first_block_group(root->fs_info, last);
309 if (!cache || cache->key.objectid >= total_fs_bytes) {
318 if (cache_miss && !cache->cached) {
319 cache_block_group(root, cache);
321 cache = btrfs_lookup_first_block_group(root->fs_info, last);
324 cache = btrfs_find_block_group(root, cache, last, data, 0);
331 static u64 div_factor(u64 num, int factor)
340 static int block_group_state_bits(u64 flags)
343 if (flags & BTRFS_BLOCK_GROUP_DATA)
344 bits |= BLOCK_GROUP_DATA;
345 if (flags & BTRFS_BLOCK_GROUP_METADATA)
346 bits |= BLOCK_GROUP_METADATA;
347 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
348 bits |= BLOCK_GROUP_SYSTEM;
352 static struct btrfs_block_group_cache *
353 __btrfs_find_block_group(struct btrfs_root *root,
354 struct btrfs_block_group_cache *hint,
355 u64 search_start, int data, int owner)
357 struct btrfs_block_group_cache *cache;
358 struct extent_io_tree *block_group_cache;
359 struct btrfs_block_group_cache *found_group = NULL;
360 struct btrfs_fs_info *info = root->fs_info;
373 block_group_cache = &info->block_group_cache;
375 if (data & BTRFS_BLOCK_GROUP_METADATA)
378 bit = block_group_state_bits(data);
381 struct btrfs_block_group_cache *shint;
382 shint = btrfs_lookup_first_block_group(info, search_start);
383 if (shint && block_group_bits(shint, data) && !shint->ro) {
384 spin_lock(&shint->lock);
385 used = btrfs_block_group_used(&shint->item);
386 if (used + shint->pinned <
387 div_factor(shint->key.offset, factor)) {
388 spin_unlock(&shint->lock);
391 spin_unlock(&shint->lock);
394 if (hint && !hint->ro && block_group_bits(hint, data)) {
395 spin_lock(&hint->lock);
396 used = btrfs_block_group_used(&hint->item);
397 if (used + hint->pinned <
398 div_factor(hint->key.offset, factor)) {
399 spin_unlock(&hint->lock);
402 spin_unlock(&hint->lock);
403 last = hint->key.objectid + hint->key.offset;
406 last = max(hint->key.objectid, search_start);
412 ret = find_first_extent_bit(block_group_cache, last,
417 ret = get_state_private(block_group_cache, start, &ptr);
423 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
424 spin_lock(&cache->lock);
425 last = cache->key.objectid + cache->key.offset;
426 used = btrfs_block_group_used(&cache->item);
428 if (!cache->ro && block_group_bits(cache, data)) {
429 free_check = div_factor(cache->key.offset, factor);
430 if (used + cache->pinned < free_check) {
432 spin_unlock(&cache->lock);
436 spin_unlock(&cache->lock);
444 if (!full_search && factor < 10) {
454 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
455 struct btrfs_block_group_cache
456 *hint, u64 search_start,
460 struct btrfs_block_group_cache *ret;
461 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
464 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
465 u64 owner, u64 owner_offset)
467 u32 high_crc = ~(u32)0;
468 u32 low_crc = ~(u32)0;
470 lenum = cpu_to_le64(root_objectid);
471 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
472 lenum = cpu_to_le64(ref_generation);
473 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
474 if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
475 lenum = cpu_to_le64(owner);
476 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
477 lenum = cpu_to_le64(owner_offset);
478 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
480 return ((u64)high_crc << 32) | (u64)low_crc;
483 static int match_extent_ref(struct extent_buffer *leaf,
484 struct btrfs_extent_ref *disk_ref,
485 struct btrfs_extent_ref *cpu_ref)
490 if (cpu_ref->objectid)
491 len = sizeof(*cpu_ref);
493 len = 2 * sizeof(u64);
494 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
499 /* simple helper to search for an existing extent at a given offset */
500 int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path,
504 struct btrfs_key key;
506 maybe_lock_mutex(root);
507 key.objectid = start;
509 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
510 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
512 maybe_unlock_mutex(root);
516 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
517 struct btrfs_root *root,
518 struct btrfs_path *path, u64 bytenr,
520 u64 ref_generation, u64 owner,
521 u64 owner_offset, int del)
524 struct btrfs_key key;
525 struct btrfs_key found_key;
526 struct btrfs_extent_ref ref;
527 struct extent_buffer *leaf;
528 struct btrfs_extent_ref *disk_ref;
532 btrfs_set_stack_ref_root(&ref, root_objectid);
533 btrfs_set_stack_ref_generation(&ref, ref_generation);
534 btrfs_set_stack_ref_objectid(&ref, owner);
535 btrfs_set_stack_ref_offset(&ref, owner_offset);
537 hash = hash_extent_ref(root_objectid, ref_generation, owner,
540 key.objectid = bytenr;
541 key.type = BTRFS_EXTENT_REF_KEY;
544 ret = btrfs_search_slot(trans, root, &key, path,
548 leaf = path->nodes[0];
550 u32 nritems = btrfs_header_nritems(leaf);
551 if (path->slots[0] >= nritems) {
552 ret2 = btrfs_next_leaf(root, path);
555 leaf = path->nodes[0];
557 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
558 if (found_key.objectid != bytenr ||
559 found_key.type != BTRFS_EXTENT_REF_KEY)
561 key.offset = found_key.offset;
563 btrfs_release_path(root, path);
567 disk_ref = btrfs_item_ptr(path->nodes[0],
569 struct btrfs_extent_ref);
570 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
574 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
575 key.offset = found_key.offset + 1;
576 btrfs_release_path(root, path);
583 * Back reference rules. Back refs have three main goals:
585 * 1) differentiate between all holders of references to an extent so that
586 * when a reference is dropped we can make sure it was a valid reference
587 * before freeing the extent.
589 * 2) Provide enough information to quickly find the holders of an extent
590 * if we notice a given block is corrupted or bad.
592 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
593 * maintenance. This is actually the same as #2, but with a slightly
594 * different use case.
596 * File extents can be referenced by:
598 * - multiple snapshots, subvolumes, or different generations in one subvol
599 * - different files inside a single subvolume (in theory, not implemented yet)
600 * - different offsets inside a file (bookend extents in file.c)
602 * The extent ref structure has fields for:
604 * - Objectid of the subvolume root
605 * - Generation number of the tree holding the reference
606 * - objectid of the file holding the reference
607 * - offset in the file corresponding to the key holding the reference
609 * When a file extent is allocated the fields are filled in:
610 * (root_key.objectid, trans->transid, inode objectid, offset in file)
612 * When a leaf is cow'd new references are added for every file extent found
613 * in the leaf. It looks the same as the create case, but trans->transid
614 * will be different when the block is cow'd.
616 * (root_key.objectid, trans->transid, inode objectid, offset in file)
618 * When a file extent is removed either during snapshot deletion or file
619 * truncation, the corresponding back reference is found
622 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
623 * inode objectid, offset in file)
625 * Btree extents can be referenced by:
627 * - Different subvolumes
628 * - Different generations of the same subvolume
630 * Storing sufficient information for a full reverse mapping of a btree
631 * block would require storing the lowest key of the block in the backref,
632 * and it would require updating that lowest key either before write out or
633 * every time it changed. Instead, the objectid of the lowest key is stored
634 * along with the level of the tree block. This provides a hint
635 * about where in the btree the block can be found. Searches through the
636 * btree only need to look for a pointer to that block, so they stop one
637 * level higher than the level recorded in the backref.
639 * Some btrees do not do reference counting on their extents. These
640 * include the extent tree and the tree of tree roots. Backrefs for these
641 * trees always have a generation of zero.
643 * When a tree block is created, back references are inserted:
645 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
647 * When a tree block is cow'd in a reference counted root,
648 * new back references are added for all the blocks it points to.
649 * These are of the form (trans->transid will have increased since creation):
651 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
653 * Because the lowest_key_objectid and the level are just hints
654 * they are not used when backrefs are deleted. When a backref is deleted:
656 * if backref was for a tree root:
657 * root_objectid = root->root_key.objectid
659 * root_objectid = btrfs_header_owner(parent)
661 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
663 * Back Reference Key hashing:
665 * Back references have four fields, each 64 bits long. Unfortunately,
666 * This is hashed into a single 64 bit number and placed into the key offset.
667 * The key objectid corresponds to the first byte in the extent, and the
668 * key type is set to BTRFS_EXTENT_REF_KEY
670 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
671 struct btrfs_root *root,
672 struct btrfs_path *path, u64 bytenr,
673 u64 root_objectid, u64 ref_generation,
674 u64 owner, u64 owner_offset)
677 struct btrfs_key key;
678 struct btrfs_extent_ref ref;
679 struct btrfs_extent_ref *disk_ref;
682 btrfs_set_stack_ref_root(&ref, root_objectid);
683 btrfs_set_stack_ref_generation(&ref, ref_generation);
684 btrfs_set_stack_ref_objectid(&ref, owner);
685 btrfs_set_stack_ref_offset(&ref, owner_offset);
687 hash = hash_extent_ref(root_objectid, ref_generation, owner,
690 key.objectid = bytenr;
691 key.type = BTRFS_EXTENT_REF_KEY;
693 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
694 while (ret == -EEXIST) {
695 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
696 struct btrfs_extent_ref);
697 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
700 btrfs_release_path(root, path);
701 ret = btrfs_insert_empty_item(trans, root, path, &key,
706 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
707 struct btrfs_extent_ref);
708 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
710 btrfs_mark_buffer_dirty(path->nodes[0]);
712 btrfs_release_path(root, path);
716 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
717 struct btrfs_root *root,
718 u64 bytenr, u64 num_bytes,
719 u64 root_objectid, u64 ref_generation,
720 u64 owner, u64 owner_offset)
722 struct btrfs_path *path;
724 struct btrfs_key key;
725 struct extent_buffer *l;
726 struct btrfs_extent_item *item;
729 WARN_ON(num_bytes < root->sectorsize);
730 path = btrfs_alloc_path();
735 key.objectid = bytenr;
736 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
737 key.offset = num_bytes;
738 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
747 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
748 refs = btrfs_extent_refs(l, item);
749 btrfs_set_extent_refs(l, item, refs + 1);
750 btrfs_mark_buffer_dirty(path->nodes[0]);
752 btrfs_release_path(root->fs_info->extent_root, path);
755 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
756 path, bytenr, root_objectid,
757 ref_generation, owner, owner_offset);
759 finish_current_insert(trans, root->fs_info->extent_root);
760 del_pending_extents(trans, root->fs_info->extent_root);
762 btrfs_free_path(path);
766 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
767 struct btrfs_root *root,
768 u64 bytenr, u64 num_bytes,
769 u64 root_objectid, u64 ref_generation,
770 u64 owner, u64 owner_offset)
774 mutex_lock(&root->fs_info->alloc_mutex);
775 ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
776 root_objectid, ref_generation,
777 owner, owner_offset);
778 mutex_unlock(&root->fs_info->alloc_mutex);
782 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
783 struct btrfs_root *root)
785 finish_current_insert(trans, root->fs_info->extent_root);
786 del_pending_extents(trans, root->fs_info->extent_root);
790 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
791 struct btrfs_root *root, u64 bytenr,
792 u64 num_bytes, u32 *refs)
794 struct btrfs_path *path;
796 struct btrfs_key key;
797 struct extent_buffer *l;
798 struct btrfs_extent_item *item;
800 WARN_ON(num_bytes < root->sectorsize);
801 path = btrfs_alloc_path();
803 key.objectid = bytenr;
804 key.offset = num_bytes;
805 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
806 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
811 btrfs_print_leaf(root, path->nodes[0]);
812 printk("failed to find block number %Lu\n", bytenr);
816 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
817 *refs = btrfs_extent_refs(l, item);
819 btrfs_free_path(path);
824 static int get_reference_status(struct btrfs_root *root, u64 bytenr,
825 u64 parent_gen, u64 ref_objectid,
826 u64 *min_generation, u32 *ref_count)
828 struct btrfs_root *extent_root = root->fs_info->extent_root;
829 struct btrfs_path *path;
830 struct extent_buffer *leaf;
831 struct btrfs_extent_ref *ref_item;
832 struct btrfs_key key;
833 struct btrfs_key found_key;
834 u64 root_objectid = root->root_key.objectid;
839 key.objectid = bytenr;
841 key.type = BTRFS_EXTENT_ITEM_KEY;
843 path = btrfs_alloc_path();
844 mutex_lock(&root->fs_info->alloc_mutex);
845 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
850 leaf = path->nodes[0];
851 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
853 if (found_key.objectid != bytenr ||
854 found_key.type != BTRFS_EXTENT_ITEM_KEY) {
860 *min_generation = (u64)-1;
863 leaf = path->nodes[0];
864 nritems = btrfs_header_nritems(leaf);
865 if (path->slots[0] >= nritems) {
866 ret = btrfs_next_leaf(extent_root, path);
873 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
874 if (found_key.objectid != bytenr)
877 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
882 ref_item = btrfs_item_ptr(leaf, path->slots[0],
883 struct btrfs_extent_ref);
884 ref_generation = btrfs_ref_generation(leaf, ref_item);
886 * For (parent_gen > 0 && parent_gen > ref_gen):
888 * we reach here through the oldest root, therefore
889 * all other reference from same snapshot should have
890 * a larger generation.
892 if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
893 (parent_gen > 0 && parent_gen > ref_generation) ||
894 (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
895 ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
902 if (*min_generation > ref_generation)
903 *min_generation = ref_generation;
909 mutex_unlock(&root->fs_info->alloc_mutex);
910 btrfs_free_path(path);
914 int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans,
915 struct btrfs_root *root,
916 struct btrfs_key *key, u64 bytenr)
918 struct btrfs_root *old_root;
919 struct btrfs_path *path = NULL;
920 struct extent_buffer *eb;
921 struct btrfs_file_extent_item *item;
929 BUG_ON(trans == NULL);
930 BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
931 ret = get_reference_status(root, bytenr, 0, key->objectid,
932 &min_generation, &ref_count);
939 old_root = root->dirty_root->root;
940 ref_generation = old_root->root_key.offset;
942 /* all references are created in running transaction */
943 if (min_generation > ref_generation) {
948 path = btrfs_alloc_path();
954 path->skip_locking = 1;
955 /* if no item found, the extent is referenced by other snapshot */
956 ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
961 item = btrfs_item_ptr(eb, path->slots[0],
962 struct btrfs_file_extent_item);
963 if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
964 btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
969 for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
971 eb = path->nodes[level];
974 extent_start = eb->start;
976 extent_start = bytenr;
978 ret = get_reference_status(root, extent_start, ref_generation,
979 0, &min_generation, &ref_count);
983 if (ref_count != 1) {
988 ref_generation = btrfs_header_generation(eb);
993 btrfs_free_path(path);
997 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
998 struct extent_buffer *buf, int cache_ref)
1002 struct btrfs_key key;
1003 struct btrfs_file_extent_item *fi;
1008 int nr_file_extents = 0;
1010 if (!root->ref_cows)
1013 level = btrfs_header_level(buf);
1014 nritems = btrfs_header_nritems(buf);
1015 for (i = 0; i < nritems; i++) {
1019 btrfs_item_key_to_cpu(buf, &key, i);
1020 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1022 fi = btrfs_item_ptr(buf, i,
1023 struct btrfs_file_extent_item);
1024 if (btrfs_file_extent_type(buf, fi) ==
1025 BTRFS_FILE_EXTENT_INLINE)
1027 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1028 if (disk_bytenr == 0)
1031 if (buf != root->commit_root)
1034 mutex_lock(&root->fs_info->alloc_mutex);
1035 ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
1036 btrfs_file_extent_disk_num_bytes(buf, fi),
1037 root->root_key.objectid, trans->transid,
1038 key.objectid, key.offset);
1039 mutex_unlock(&root->fs_info->alloc_mutex);
1046 bytenr = btrfs_node_blockptr(buf, i);
1047 btrfs_node_key_to_cpu(buf, &key, i);
1049 mutex_lock(&root->fs_info->alloc_mutex);
1050 ret = __btrfs_inc_extent_ref(trans, root, bytenr,
1051 btrfs_level_size(root, level - 1),
1052 root->root_key.objectid,
1054 level - 1, key.objectid);
1055 mutex_unlock(&root->fs_info->alloc_mutex);
1063 /* cache orignal leaf block's references */
1064 if (level == 0 && cache_ref && buf != root->commit_root) {
1065 struct btrfs_leaf_ref *ref;
1066 struct btrfs_extent_info *info;
1068 ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
1074 ref->root_gen = root->root_key.offset;
1075 ref->bytenr = buf->start;
1076 ref->owner = btrfs_header_owner(buf);
1077 ref->generation = btrfs_header_generation(buf);
1078 ref->nritems = nr_file_extents;
1079 info = ref->extents;
1081 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1083 btrfs_item_key_to_cpu(buf, &key, i);
1084 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1086 fi = btrfs_item_ptr(buf, i,
1087 struct btrfs_file_extent_item);
1088 if (btrfs_file_extent_type(buf, fi) ==
1089 BTRFS_FILE_EXTENT_INLINE)
1091 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1092 if (disk_bytenr == 0)
1095 info->bytenr = disk_bytenr;
1097 btrfs_file_extent_disk_num_bytes(buf, fi);
1098 info->objectid = key.objectid;
1099 info->offset = key.offset;
1103 BUG_ON(!root->ref_tree);
1104 ret = btrfs_add_leaf_ref(root, ref);
1106 btrfs_free_leaf_ref(root, ref);
1113 for (i =0; i < faili; i++) {
1116 btrfs_item_key_to_cpu(buf, &key, i);
1117 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1119 fi = btrfs_item_ptr(buf, i,
1120 struct btrfs_file_extent_item);
1121 if (btrfs_file_extent_type(buf, fi) ==
1122 BTRFS_FILE_EXTENT_INLINE)
1124 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1125 if (disk_bytenr == 0)
1127 err = btrfs_free_extent(trans, root, disk_bytenr,
1128 btrfs_file_extent_disk_num_bytes(buf,
1132 bytenr = btrfs_node_blockptr(buf, i);
1133 err = btrfs_free_extent(trans, root, bytenr,
1134 btrfs_level_size(root, level - 1), 0);
1142 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1143 struct btrfs_root *root,
1144 struct btrfs_path *path,
1145 struct btrfs_block_group_cache *cache)
1149 struct btrfs_root *extent_root = root->fs_info->extent_root;
1151 struct extent_buffer *leaf;
1153 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1158 leaf = path->nodes[0];
1159 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1160 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1161 btrfs_mark_buffer_dirty(leaf);
1162 btrfs_release_path(extent_root, path);
1164 finish_current_insert(trans, extent_root);
1165 pending_ret = del_pending_extents(trans, extent_root);
1174 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1175 struct btrfs_root *root)
1177 struct extent_io_tree *block_group_cache;
1178 struct btrfs_block_group_cache *cache;
1182 struct btrfs_path *path;
1188 block_group_cache = &root->fs_info->block_group_cache;
1189 path = btrfs_alloc_path();
1193 mutex_lock(&root->fs_info->alloc_mutex);
1195 ret = find_first_extent_bit(block_group_cache, last,
1196 &start, &end, BLOCK_GROUP_DIRTY);
1201 ret = get_state_private(block_group_cache, start, &ptr);
1204 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1205 err = write_one_cache_group(trans, root,
1208 * if we fail to write the cache group, we want
1209 * to keep it marked dirty in hopes that a later
1216 clear_extent_bits(block_group_cache, start, end,
1217 BLOCK_GROUP_DIRTY, GFP_NOFS);
1219 btrfs_free_path(path);
1220 mutex_unlock(&root->fs_info->alloc_mutex);
1224 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1227 struct list_head *head = &info->space_info;
1228 struct list_head *cur;
1229 struct btrfs_space_info *found;
1230 list_for_each(cur, head) {
1231 found = list_entry(cur, struct btrfs_space_info, list);
1232 if (found->flags == flags)
1239 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1240 u64 total_bytes, u64 bytes_used,
1241 struct btrfs_space_info **space_info)
1243 struct btrfs_space_info *found;
1245 found = __find_space_info(info, flags);
1247 found->total_bytes += total_bytes;
1248 found->bytes_used += bytes_used;
1250 *space_info = found;
1253 found = kmalloc(sizeof(*found), GFP_NOFS);
1257 list_add(&found->list, &info->space_info);
1258 found->flags = flags;
1259 found->total_bytes = total_bytes;
1260 found->bytes_used = bytes_used;
1261 found->bytes_pinned = 0;
1263 found->force_alloc = 0;
1264 *space_info = found;
1268 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1270 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1271 BTRFS_BLOCK_GROUP_RAID1 |
1272 BTRFS_BLOCK_GROUP_RAID10 |
1273 BTRFS_BLOCK_GROUP_DUP);
1275 if (flags & BTRFS_BLOCK_GROUP_DATA)
1276 fs_info->avail_data_alloc_bits |= extra_flags;
1277 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1278 fs_info->avail_metadata_alloc_bits |= extra_flags;
1279 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1280 fs_info->avail_system_alloc_bits |= extra_flags;
1284 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1286 u64 num_devices = root->fs_info->fs_devices->num_devices;
1288 if (num_devices == 1)
1289 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1290 if (num_devices < 4)
1291 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1293 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1294 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1295 BTRFS_BLOCK_GROUP_RAID10))) {
1296 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1299 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1300 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1301 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1304 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1305 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1306 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1307 (flags & BTRFS_BLOCK_GROUP_DUP)))
1308 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1312 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1313 struct btrfs_root *extent_root, u64 alloc_bytes,
1314 u64 flags, int force)
1316 struct btrfs_space_info *space_info;
1322 flags = reduce_alloc_profile(extent_root, flags);
1324 space_info = __find_space_info(extent_root->fs_info, flags);
1326 ret = update_space_info(extent_root->fs_info, flags,
1330 BUG_ON(!space_info);
1332 if (space_info->force_alloc) {
1334 space_info->force_alloc = 0;
1336 if (space_info->full)
1339 thresh = div_factor(space_info->total_bytes, 6);
1341 (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1345 mutex_lock(&extent_root->fs_info->chunk_mutex);
1346 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1347 if (ret == -ENOSPC) {
1348 printk("space info full %Lu\n", flags);
1349 space_info->full = 1;
1354 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1355 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1358 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1363 static int update_block_group(struct btrfs_trans_handle *trans,
1364 struct btrfs_root *root,
1365 u64 bytenr, u64 num_bytes, int alloc,
1368 struct btrfs_block_group_cache *cache;
1369 struct btrfs_fs_info *info = root->fs_info;
1370 u64 total = num_bytes;
1376 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1378 cache = btrfs_lookup_block_group(info, bytenr);
1382 byte_in_group = bytenr - cache->key.objectid;
1383 WARN_ON(byte_in_group > cache->key.offset);
1384 start = cache->key.objectid;
1385 end = start + cache->key.offset - 1;
1386 set_extent_bits(&info->block_group_cache, start, end,
1387 BLOCK_GROUP_DIRTY, GFP_NOFS);
1389 spin_lock(&cache->lock);
1390 old_val = btrfs_block_group_used(&cache->item);
1391 num_bytes = min(total, cache->key.offset - byte_in_group);
1393 old_val += num_bytes;
1394 cache->space_info->bytes_used += num_bytes;
1395 btrfs_set_block_group_used(&cache->item, old_val);
1396 spin_unlock(&cache->lock);
1398 old_val -= num_bytes;
1399 cache->space_info->bytes_used -= num_bytes;
1400 btrfs_set_block_group_used(&cache->item, old_val);
1401 spin_unlock(&cache->lock);
1403 set_extent_dirty(&info->free_space_cache,
1404 bytenr, bytenr + num_bytes - 1,
1409 bytenr += num_bytes;
1414 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1419 ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1420 search_start, &start, &end,
1421 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1422 BLOCK_GROUP_SYSTEM);
1429 int btrfs_update_pinned_extents(struct btrfs_root *root,
1430 u64 bytenr, u64 num, int pin)
1433 struct btrfs_block_group_cache *cache;
1434 struct btrfs_fs_info *fs_info = root->fs_info;
1436 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1438 set_extent_dirty(&fs_info->pinned_extents,
1439 bytenr, bytenr + num - 1, GFP_NOFS);
1441 clear_extent_dirty(&fs_info->pinned_extents,
1442 bytenr, bytenr + num - 1, GFP_NOFS);
1445 cache = btrfs_lookup_block_group(fs_info, bytenr);
1447 u64 first = first_logical_byte(root, bytenr);
1448 WARN_ON(first < bytenr);
1449 len = min(first - bytenr, num);
1451 len = min(num, cache->key.offset -
1452 (bytenr - cache->key.objectid));
1456 spin_lock(&cache->lock);
1457 cache->pinned += len;
1458 cache->space_info->bytes_pinned += len;
1459 spin_unlock(&cache->lock);
1461 fs_info->total_pinned += len;
1464 spin_lock(&cache->lock);
1465 cache->pinned -= len;
1466 cache->space_info->bytes_pinned -= len;
1467 spin_unlock(&cache->lock);
1469 fs_info->total_pinned -= len;
1477 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1482 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1486 ret = find_first_extent_bit(pinned_extents, last,
1487 &start, &end, EXTENT_DIRTY);
1490 set_extent_dirty(copy, start, end, GFP_NOFS);
1496 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1497 struct btrfs_root *root,
1498 struct extent_io_tree *unpin)
1503 struct extent_io_tree *free_space_cache;
1504 free_space_cache = &root->fs_info->free_space_cache;
1506 mutex_lock(&root->fs_info->alloc_mutex);
1508 ret = find_first_extent_bit(unpin, 0, &start, &end,
1512 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
1513 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1514 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1515 if (need_resched()) {
1516 mutex_unlock(&root->fs_info->alloc_mutex);
1518 mutex_lock(&root->fs_info->alloc_mutex);
1521 mutex_unlock(&root->fs_info->alloc_mutex);
1525 static int finish_current_insert(struct btrfs_trans_handle *trans,
1526 struct btrfs_root *extent_root)
1530 struct btrfs_fs_info *info = extent_root->fs_info;
1531 struct extent_buffer *eb;
1532 struct btrfs_path *path;
1533 struct btrfs_key ins;
1534 struct btrfs_disk_key first;
1535 struct btrfs_extent_item extent_item;
1540 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1541 btrfs_set_stack_extent_refs(&extent_item, 1);
1542 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1543 path = btrfs_alloc_path();
1546 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1547 &end, EXTENT_LOCKED);
1551 ins.objectid = start;
1552 ins.offset = end + 1 - start;
1553 err = btrfs_insert_item(trans, extent_root, &ins,
1554 &extent_item, sizeof(extent_item));
1555 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1558 eb = btrfs_find_create_tree_block(extent_root, ins.objectid,
1561 if (!btrfs_buffer_uptodate(eb, trans->transid))
1562 btrfs_read_buffer(eb, trans->transid);
1564 btrfs_tree_lock(eb);
1565 level = btrfs_header_level(eb);
1567 btrfs_item_key(eb, &first, 0);
1569 btrfs_node_key(eb, &first, 0);
1571 btrfs_tree_unlock(eb);
1572 free_extent_buffer(eb);
1574 * the first key is just a hint, so the race we've created
1575 * against reading it is fine
1577 err = btrfs_insert_extent_backref(trans, extent_root, path,
1578 start, extent_root->root_key.objectid,
1580 btrfs_disk_key_objectid(&first));
1582 if (need_resched()) {
1583 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1585 mutex_lock(&extent_root->fs_info->alloc_mutex);
1588 btrfs_free_path(path);
1592 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1593 int is_data, int pending)
1597 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1599 struct extent_buffer *buf;
1604 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1606 /* we can reuse a block if it hasn't been written
1607 * and it is from this transaction. We can't
1608 * reuse anything from the tree log root because
1609 * it has tiny sub-transactions.
1611 if (btrfs_buffer_uptodate(buf, 0) &&
1612 btrfs_try_tree_lock(buf)) {
1614 root->fs_info->running_transaction->transid;
1615 u64 header_transid =
1616 btrfs_header_generation(buf);
1617 if (btrfs_header_owner(buf) !=
1618 BTRFS_TREE_LOG_OBJECTID &&
1619 header_transid == transid &&
1620 !btrfs_header_flag(buf,
1621 BTRFS_HEADER_FLAG_WRITTEN)) {
1622 clean_tree_block(NULL, root, buf);
1623 btrfs_tree_unlock(buf);
1624 free_extent_buffer(buf);
1627 btrfs_tree_unlock(buf);
1629 free_extent_buffer(buf);
1632 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
1634 set_extent_bits(&root->fs_info->pending_del,
1635 bytenr, bytenr + num_bytes - 1,
1636 EXTENT_LOCKED, GFP_NOFS);
1643 * remove an extent from the root, returns 0 on success
1645 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1646 *root, u64 bytenr, u64 num_bytes,
1647 u64 root_objectid, u64 ref_generation,
1648 u64 owner_objectid, u64 owner_offset, int pin,
1651 struct btrfs_path *path;
1652 struct btrfs_key key;
1653 struct btrfs_fs_info *info = root->fs_info;
1654 struct btrfs_root *extent_root = info->extent_root;
1655 struct extent_buffer *leaf;
1657 int extent_slot = 0;
1658 int found_extent = 0;
1660 struct btrfs_extent_item *ei;
1663 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1664 key.objectid = bytenr;
1665 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1666 key.offset = num_bytes;
1667 path = btrfs_alloc_path();
1672 ret = lookup_extent_backref(trans, extent_root, path,
1673 bytenr, root_objectid,
1675 owner_objectid, owner_offset, 1);
1677 struct btrfs_key found_key;
1678 extent_slot = path->slots[0];
1679 while(extent_slot > 0) {
1681 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1683 if (found_key.objectid != bytenr)
1685 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1686 found_key.offset == num_bytes) {
1690 if (path->slots[0] - extent_slot > 5)
1694 ret = btrfs_del_item(trans, extent_root, path);
1696 btrfs_print_leaf(extent_root, path->nodes[0]);
1698 printk("Unable to find ref byte nr %Lu root %Lu "
1699 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1700 root_objectid, ref_generation, owner_objectid,
1703 if (!found_extent) {
1704 btrfs_release_path(extent_root, path);
1705 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1709 extent_slot = path->slots[0];
1712 leaf = path->nodes[0];
1713 ei = btrfs_item_ptr(leaf, extent_slot,
1714 struct btrfs_extent_item);
1715 refs = btrfs_extent_refs(leaf, ei);
1718 btrfs_set_extent_refs(leaf, ei, refs);
1720 btrfs_mark_buffer_dirty(leaf);
1722 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1723 /* if the back ref and the extent are next to each other
1724 * they get deleted below in one shot
1726 path->slots[0] = extent_slot;
1728 } else if (found_extent) {
1729 /* otherwise delete the extent back ref */
1730 ret = btrfs_del_item(trans, extent_root, path);
1732 /* if refs are 0, we need to setup the path for deletion */
1734 btrfs_release_path(extent_root, path);
1735 ret = btrfs_search_slot(trans, extent_root, &key, path,
1746 #ifdef BIO_RW_DISCARD
1747 u64 map_length = num_bytes;
1748 struct btrfs_multi_bio *multi = NULL;
1752 ret = pin_down_bytes(root, bytenr, num_bytes,
1753 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0);
1759 /* block accounting for super block */
1760 spin_lock_irq(&info->delalloc_lock);
1761 super_used = btrfs_super_bytes_used(&info->super_copy);
1762 btrfs_set_super_bytes_used(&info->super_copy,
1763 super_used - num_bytes);
1764 spin_unlock_irq(&info->delalloc_lock);
1766 /* block accounting for root item */
1767 root_used = btrfs_root_used(&root->root_item);
1768 btrfs_set_root_used(&root->root_item,
1769 root_used - num_bytes);
1770 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1775 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1779 #ifdef BIO_RW_DISCARD
1780 /* Tell the block device(s) that the sectors can be discarded */
1781 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1782 bytenr, &map_length, &multi, 0);
1784 struct btrfs_bio_stripe *stripe = multi->stripes;
1787 if (map_length > num_bytes)
1788 map_length = num_bytes;
1790 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1791 blkdev_issue_discard(stripe->dev->bdev,
1792 stripe->physical >> 9,
1799 btrfs_free_path(path);
1800 finish_current_insert(trans, extent_root);
1805 * find all the blocks marked as pending in the radix tree and remove
1806 * them from the extent map
1808 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1809 btrfs_root *extent_root)
1815 struct extent_io_tree *pending_del;
1816 struct extent_io_tree *pinned_extents;
1818 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1819 pending_del = &extent_root->fs_info->pending_del;
1820 pinned_extents = &extent_root->fs_info->pinned_extents;
1823 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1827 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1829 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1830 start, end, EXTENT_LOCKED, 0)) {
1831 btrfs_update_pinned_extents(extent_root, start,
1832 end + 1 - start, 1);
1833 ret = __free_extent(trans, extent_root,
1834 start, end + 1 - start,
1835 extent_root->root_key.objectid,
1838 clear_extent_bits(&extent_root->fs_info->extent_ins,
1839 start, end, EXTENT_LOCKED, GFP_NOFS);
1844 if (need_resched()) {
1845 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1847 mutex_lock(&extent_root->fs_info->alloc_mutex);
1854 * remove an extent from the root, returns 0 on success
1856 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1857 struct btrfs_root *root, u64 bytenr,
1858 u64 num_bytes, u64 root_objectid,
1859 u64 ref_generation, u64 owner_objectid,
1860 u64 owner_offset, int pin)
1862 struct btrfs_root *extent_root = root->fs_info->extent_root;
1866 WARN_ON(num_bytes < root->sectorsize);
1867 if (!root->ref_cows)
1870 if (root == extent_root) {
1871 pin_down_bytes(root, bytenr, num_bytes, 0, 1);
1874 /* if metadata always pin */
1875 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1878 /* if data pin when any transaction has committed this */
1879 if (ref_generation != trans->transid)
1882 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1883 ref_generation, owner_objectid, owner_offset,
1886 finish_current_insert(trans, root->fs_info->extent_root);
1887 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1888 return ret ? ret : pending_ret;
1891 int btrfs_free_extent(struct btrfs_trans_handle *trans,
1892 struct btrfs_root *root, u64 bytenr,
1893 u64 num_bytes, u64 root_objectid,
1894 u64 ref_generation, u64 owner_objectid,
1895 u64 owner_offset, int pin)
1899 maybe_lock_mutex(root);
1900 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1901 root_objectid, ref_generation,
1902 owner_objectid, owner_offset, pin);
1903 maybe_unlock_mutex(root);
1907 static u64 stripe_align(struct btrfs_root *root, u64 val)
1909 u64 mask = ((u64)root->stripesize - 1);
1910 u64 ret = (val + mask) & ~mask;
1915 * walks the btree of allocated extents and find a hole of a given size.
1916 * The key ins is changed to record the hole:
1917 * ins->objectid == block start
1918 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1919 * ins->offset == number of blocks
1920 * Any available blocks before search_start are skipped.
1922 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1923 struct btrfs_root *orig_root,
1924 u64 num_bytes, u64 empty_size,
1925 u64 search_start, u64 search_end,
1926 u64 hint_byte, struct btrfs_key *ins,
1927 u64 exclude_start, u64 exclude_nr,
1931 u64 orig_search_start;
1932 struct btrfs_root * root = orig_root->fs_info->extent_root;
1933 struct btrfs_fs_info *info = root->fs_info;
1934 u64 total_needed = num_bytes;
1935 u64 *last_ptr = NULL;
1936 struct btrfs_block_group_cache *block_group;
1939 int chunk_alloc_done = 0;
1940 int empty_cluster = 2 * 1024 * 1024;
1941 int allowed_chunk_alloc = 0;
1943 WARN_ON(num_bytes < root->sectorsize);
1944 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1946 if (orig_root->ref_cows || empty_size)
1947 allowed_chunk_alloc = 1;
1949 if (data & BTRFS_BLOCK_GROUP_METADATA) {
1950 last_ptr = &root->fs_info->last_alloc;
1951 empty_cluster = 256 * 1024;
1954 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1955 last_ptr = &root->fs_info->last_data_alloc;
1957 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1958 last_ptr = &root->fs_info->last_log_alloc;
1959 if (!last_ptr == 0 && root->fs_info->last_alloc) {
1960 *last_ptr = root->fs_info->last_alloc + empty_cluster;
1966 hint_byte = *last_ptr;
1968 empty_size += empty_cluster;
1972 search_start = max(search_start, first_logical_byte(root, 0));
1973 orig_search_start = search_start;
1975 if (search_end == (u64)-1)
1976 search_end = btrfs_super_total_bytes(&info->super_copy);
1979 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1981 hint_byte = search_start;
1982 block_group = btrfs_find_block_group(root, block_group,
1983 hint_byte, data, 1);
1984 if (last_ptr && *last_ptr == 0 && block_group)
1985 hint_byte = block_group->key.objectid;
1987 block_group = btrfs_find_block_group(root,
1989 search_start, data, 1);
1991 search_start = max(search_start, hint_byte);
1993 total_needed += empty_size;
1997 block_group = btrfs_lookup_first_block_group(info,
2000 block_group = btrfs_lookup_first_block_group(info,
2003 if (full_scan && !chunk_alloc_done) {
2004 if (allowed_chunk_alloc) {
2005 do_chunk_alloc(trans, root,
2006 num_bytes + 2 * 1024 * 1024, data, 1);
2007 allowed_chunk_alloc = 0;
2008 } else if (block_group && block_group_bits(block_group, data)) {
2009 block_group->space_info->force_alloc = 1;
2011 chunk_alloc_done = 1;
2013 ret = find_search_start(root, &block_group, &search_start,
2014 total_needed, data);
2015 if (ret == -ENOSPC && last_ptr && *last_ptr) {
2017 block_group = btrfs_lookup_first_block_group(info,
2019 search_start = orig_search_start;
2020 ret = find_search_start(root, &block_group, &search_start,
2021 total_needed, data);
2028 if (last_ptr && *last_ptr && search_start != *last_ptr) {
2031 empty_size += empty_cluster;
2032 total_needed += empty_size;
2034 block_group = btrfs_lookup_first_block_group(info,
2036 search_start = orig_search_start;
2037 ret = find_search_start(root, &block_group,
2038 &search_start, total_needed, data);
2045 search_start = stripe_align(root, search_start);
2046 ins->objectid = search_start;
2047 ins->offset = num_bytes;
2049 if (ins->objectid + num_bytes >= search_end)
2052 if (ins->objectid + num_bytes >
2053 block_group->key.objectid + block_group->key.offset) {
2054 search_start = block_group->key.objectid +
2055 block_group->key.offset;
2059 if (test_range_bit(&info->extent_ins, ins->objectid,
2060 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
2061 search_start = ins->objectid + num_bytes;
2065 if (test_range_bit(&info->pinned_extents, ins->objectid,
2066 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
2067 search_start = ins->objectid + num_bytes;
2071 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
2072 ins->objectid < exclude_start + exclude_nr)) {
2073 search_start = exclude_start + exclude_nr;
2077 if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
2078 block_group = btrfs_lookup_block_group(info, ins->objectid);
2080 trans->block_group = block_group;
2082 ins->offset = num_bytes;
2084 *last_ptr = ins->objectid + ins->offset;
2086 btrfs_super_total_bytes(&root->fs_info->super_copy)) {
2093 if (search_start + num_bytes >= search_end) {
2095 search_start = orig_search_start;
2102 total_needed -= empty_size;
2107 block_group = btrfs_lookup_first_block_group(info, search_start);
2109 block_group = btrfs_find_block_group(root, block_group,
2110 search_start, data, 0);
2117 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2118 struct btrfs_root *root,
2119 u64 num_bytes, u64 min_alloc_size,
2120 u64 empty_size, u64 hint_byte,
2121 u64 search_end, struct btrfs_key *ins,
2125 u64 search_start = 0;
2127 struct btrfs_fs_info *info = root->fs_info;
2130 alloc_profile = info->avail_data_alloc_bits &
2131 info->data_alloc_profile;
2132 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2133 } else if (root == root->fs_info->chunk_root) {
2134 alloc_profile = info->avail_system_alloc_bits &
2135 info->system_alloc_profile;
2136 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2138 alloc_profile = info->avail_metadata_alloc_bits &
2139 info->metadata_alloc_profile;
2140 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2143 data = reduce_alloc_profile(root, data);
2145 * the only place that sets empty_size is btrfs_realloc_node, which
2146 * is not called recursively on allocations
2148 if (empty_size || root->ref_cows) {
2149 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2150 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2152 BTRFS_BLOCK_GROUP_METADATA |
2153 (info->metadata_alloc_profile &
2154 info->avail_metadata_alloc_bits), 0);
2157 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2158 num_bytes + 2 * 1024 * 1024, data, 0);
2162 WARN_ON(num_bytes < root->sectorsize);
2163 ret = find_free_extent(trans, root, num_bytes, empty_size,
2164 search_start, search_end, hint_byte, ins,
2165 trans->alloc_exclude_start,
2166 trans->alloc_exclude_nr, data);
2168 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2169 num_bytes = num_bytes >> 1;
2170 num_bytes = max(num_bytes, min_alloc_size);
2171 do_chunk_alloc(trans, root->fs_info->extent_root,
2172 num_bytes, data, 1);
2176 printk("allocation failed flags %Lu\n", data);
2179 clear_extent_dirty(&root->fs_info->free_space_cache,
2180 ins->objectid, ins->objectid + ins->offset - 1,
2185 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2187 maybe_lock_mutex(root);
2188 set_extent_dirty(&root->fs_info->free_space_cache,
2189 start, start + len - 1, GFP_NOFS);
2190 maybe_unlock_mutex(root);
2194 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2195 struct btrfs_root *root,
2196 u64 num_bytes, u64 min_alloc_size,
2197 u64 empty_size, u64 hint_byte,
2198 u64 search_end, struct btrfs_key *ins,
2202 maybe_lock_mutex(root);
2203 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2204 empty_size, hint_byte, search_end, ins,
2206 maybe_unlock_mutex(root);
2210 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2211 struct btrfs_root *root,
2212 u64 root_objectid, u64 ref_generation,
2213 u64 owner, u64 owner_offset,
2214 struct btrfs_key *ins)
2220 u64 num_bytes = ins->offset;
2222 struct btrfs_fs_info *info = root->fs_info;
2223 struct btrfs_root *extent_root = info->extent_root;
2224 struct btrfs_extent_item *extent_item;
2225 struct btrfs_extent_ref *ref;
2226 struct btrfs_path *path;
2227 struct btrfs_key keys[2];
2229 /* block accounting for super block */
2230 spin_lock_irq(&info->delalloc_lock);
2231 super_used = btrfs_super_bytes_used(&info->super_copy);
2232 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2233 spin_unlock_irq(&info->delalloc_lock);
2235 /* block accounting for root item */
2236 root_used = btrfs_root_used(&root->root_item);
2237 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2239 if (root == extent_root) {
2240 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2241 ins->objectid + ins->offset - 1,
2242 EXTENT_LOCKED, GFP_NOFS);
2246 memcpy(&keys[0], ins, sizeof(*ins));
2247 keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2248 owner, owner_offset);
2249 keys[1].objectid = ins->objectid;
2250 keys[1].type = BTRFS_EXTENT_REF_KEY;
2251 sizes[0] = sizeof(*extent_item);
2252 sizes[1] = sizeof(*ref);
2254 path = btrfs_alloc_path();
2257 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2261 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2262 struct btrfs_extent_item);
2263 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2264 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2265 struct btrfs_extent_ref);
2267 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2268 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2269 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2270 btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2272 btrfs_mark_buffer_dirty(path->nodes[0]);
2274 trans->alloc_exclude_start = 0;
2275 trans->alloc_exclude_nr = 0;
2276 btrfs_free_path(path);
2277 finish_current_insert(trans, extent_root);
2278 pending_ret = del_pending_extents(trans, extent_root);
2288 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2290 printk("update block group failed for %Lu %Lu\n",
2291 ins->objectid, ins->offset);
2298 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2299 struct btrfs_root *root,
2300 u64 root_objectid, u64 ref_generation,
2301 u64 owner, u64 owner_offset,
2302 struct btrfs_key *ins)
2305 maybe_lock_mutex(root);
2306 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2307 ref_generation, owner,
2309 maybe_unlock_mutex(root);
2314 * this is used by the tree logging recovery code. It records that
2315 * an extent has been allocated and makes sure to clear the free
2316 * space cache bits as well
2318 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
2319 struct btrfs_root *root,
2320 u64 root_objectid, u64 ref_generation,
2321 u64 owner, u64 owner_offset,
2322 struct btrfs_key *ins)
2325 struct btrfs_block_group_cache *block_group;
2327 maybe_lock_mutex(root);
2328 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
2329 cache_block_group(root, block_group);
2331 clear_extent_dirty(&root->fs_info->free_space_cache,
2332 ins->objectid, ins->objectid + ins->offset - 1,
2334 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2335 ref_generation, owner,
2337 maybe_unlock_mutex(root);
2342 * finds a free extent and does all the dirty work required for allocation
2343 * returns the key for the extent through ins, and a tree buffer for
2344 * the first block of the extent through buf.
2346 * returns 0 if everything worked, non-zero otherwise.
2348 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2349 struct btrfs_root *root,
2350 u64 num_bytes, u64 min_alloc_size,
2351 u64 root_objectid, u64 ref_generation,
2352 u64 owner, u64 owner_offset,
2353 u64 empty_size, u64 hint_byte,
2354 u64 search_end, struct btrfs_key *ins, u64 data)
2358 maybe_lock_mutex(root);
2360 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2361 min_alloc_size, empty_size, hint_byte,
2362 search_end, ins, data);
2364 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2365 ref_generation, owner,
2369 maybe_unlock_mutex(root);
2373 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2374 struct btrfs_root *root,
2375 u64 bytenr, u32 blocksize)
2377 struct extent_buffer *buf;
2379 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
2381 return ERR_PTR(-ENOMEM);
2382 btrfs_set_header_generation(buf, trans->transid);
2383 btrfs_tree_lock(buf);
2384 clean_tree_block(trans, root, buf);
2385 btrfs_set_buffer_uptodate(buf);
2386 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2387 buf->start + buf->len - 1, GFP_NOFS);
2388 trans->blocks_used++;
2393 * helper function to allocate a block for a given tree
2394 * returns the tree buffer or NULL.
2396 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2397 struct btrfs_root *root,
2406 struct btrfs_key ins;
2408 struct extent_buffer *buf;
2410 ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2411 root_objectid, ref_generation,
2412 level, first_objectid, empty_size, hint,
2416 return ERR_PTR(ret);
2419 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
2423 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
2424 struct btrfs_root *root, struct extent_buffer *leaf)
2427 u64 leaf_generation;
2428 struct btrfs_key key;
2429 struct btrfs_file_extent_item *fi;
2434 BUG_ON(!btrfs_is_leaf(leaf));
2435 nritems = btrfs_header_nritems(leaf);
2436 leaf_owner = btrfs_header_owner(leaf);
2437 leaf_generation = btrfs_header_generation(leaf);
2439 for (i = 0; i < nritems; i++) {
2443 btrfs_item_key_to_cpu(leaf, &key, i);
2444 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2446 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2447 if (btrfs_file_extent_type(leaf, fi) ==
2448 BTRFS_FILE_EXTENT_INLINE)
2451 * FIXME make sure to insert a trans record that
2452 * repeats the snapshot del on crash
2454 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2455 if (disk_bytenr == 0)
2458 mutex_lock(&root->fs_info->alloc_mutex);
2459 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2460 btrfs_file_extent_disk_num_bytes(leaf, fi),
2461 leaf_owner, leaf_generation,
2462 key.objectid, key.offset, 0);
2463 mutex_unlock(&root->fs_info->alloc_mutex);
2465 atomic_inc(&root->fs_info->throttle_gen);
2466 wake_up(&root->fs_info->transaction_throttle);
2474 static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
2475 struct btrfs_root *root,
2476 struct btrfs_leaf_ref *ref)
2480 struct btrfs_extent_info *info = ref->extents;
2482 for (i = 0; i < ref->nritems; i++) {
2483 mutex_lock(&root->fs_info->alloc_mutex);
2484 ret = __btrfs_free_extent(trans, root,
2485 info->bytenr, info->num_bytes,
2486 ref->owner, ref->generation,
2487 info->objectid, info->offset, 0);
2488 mutex_unlock(&root->fs_info->alloc_mutex);
2490 atomic_inc(&root->fs_info->throttle_gen);
2491 wake_up(&root->fs_info->transaction_throttle);
2501 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2506 ret = lookup_extent_ref(NULL, root, start, len, refs);
2509 #if 0 // some debugging code in case we see problems here
2510 /* if the refs count is one, it won't get increased again. But
2511 * if the ref count is > 1, someone may be decreasing it at
2512 * the same time we are.
2515 struct extent_buffer *eb = NULL;
2516 eb = btrfs_find_create_tree_block(root, start, len);
2518 btrfs_tree_lock(eb);
2520 mutex_lock(&root->fs_info->alloc_mutex);
2521 ret = lookup_extent_ref(NULL, root, start, len, refs);
2523 mutex_unlock(&root->fs_info->alloc_mutex);
2526 btrfs_tree_unlock(eb);
2527 free_extent_buffer(eb);
2530 printk("block %llu went down to one during drop_snap\n",
2531 (unsigned long long)start);
2542 * helper function for drop_snapshot, this walks down the tree dropping ref
2543 * counts as it goes.
2545 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2546 struct btrfs_root *root,
2547 struct btrfs_path *path, int *level)
2553 struct extent_buffer *next;
2554 struct extent_buffer *cur;
2555 struct extent_buffer *parent;
2556 struct btrfs_leaf_ref *ref;
2561 WARN_ON(*level < 0);
2562 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2563 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2564 path->nodes[*level]->len, &refs);
2570 * walk down to the last node level and free all the leaves
2572 while(*level >= 0) {
2573 WARN_ON(*level < 0);
2574 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2575 cur = path->nodes[*level];
2577 if (btrfs_header_level(cur) != *level)
2580 if (path->slots[*level] >=
2581 btrfs_header_nritems(cur))
2584 ret = btrfs_drop_leaf_ref(trans, root, cur);
2588 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2589 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2590 blocksize = btrfs_level_size(root, *level - 1);
2592 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2595 parent = path->nodes[*level];
2596 root_owner = btrfs_header_owner(parent);
2597 root_gen = btrfs_header_generation(parent);
2598 path->slots[*level]++;
2600 mutex_lock(&root->fs_info->alloc_mutex);
2601 ret = __btrfs_free_extent(trans, root, bytenr,
2602 blocksize, root_owner,
2605 mutex_unlock(&root->fs_info->alloc_mutex);
2607 atomic_inc(&root->fs_info->throttle_gen);
2608 wake_up(&root->fs_info->transaction_throttle);
2614 * at this point, we have a single ref, and since the
2615 * only place referencing this extent is a dead root
2616 * the reference count should never go higher.
2617 * So, we don't need to check it again
2620 struct btrfs_key key;
2621 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2622 ref = btrfs_lookup_leaf_ref(root, bytenr);
2624 ret = cache_drop_leaf_ref(trans, root, ref);
2626 btrfs_remove_leaf_ref(root, ref);
2627 btrfs_free_leaf_ref(root, ref);
2631 if (printk_ratelimit())
2632 printk("leaf ref miss for bytenr %llu\n",
2633 (unsigned long long)bytenr);
2635 next = btrfs_find_tree_block(root, bytenr, blocksize);
2636 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2637 free_extent_buffer(next);
2639 next = read_tree_block(root, bytenr, blocksize,
2644 * this is a debugging check and can go away
2645 * the ref should never go all the way down to 1
2648 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2654 WARN_ON(*level <= 0);
2655 if (path->nodes[*level-1])
2656 free_extent_buffer(path->nodes[*level-1]);
2657 path->nodes[*level-1] = next;
2658 *level = btrfs_header_level(next);
2659 path->slots[*level] = 0;
2663 WARN_ON(*level < 0);
2664 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2666 if (path->nodes[*level] == root->node) {
2667 parent = path->nodes[*level];
2668 bytenr = path->nodes[*level]->start;
2670 parent = path->nodes[*level + 1];
2671 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2674 blocksize = btrfs_level_size(root, *level);
2675 root_owner = btrfs_header_owner(parent);
2676 root_gen = btrfs_header_generation(parent);
2678 mutex_lock(&root->fs_info->alloc_mutex);
2679 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2680 root_owner, root_gen, 0, 0, 1);
2681 free_extent_buffer(path->nodes[*level]);
2682 path->nodes[*level] = NULL;
2685 mutex_unlock(&root->fs_info->alloc_mutex);
2692 * helper for dropping snapshots. This walks back up the tree in the path
2693 * to find the first node higher up where we haven't yet gone through
2696 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2697 struct btrfs_root *root,
2698 struct btrfs_path *path, int *level)
2702 struct btrfs_root_item *root_item = &root->root_item;
2707 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2708 slot = path->slots[i];
2709 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2710 struct extent_buffer *node;
2711 struct btrfs_disk_key disk_key;
2712 node = path->nodes[i];
2715 WARN_ON(*level == 0);
2716 btrfs_node_key(node, &disk_key, path->slots[i]);
2717 memcpy(&root_item->drop_progress,
2718 &disk_key, sizeof(disk_key));
2719 root_item->drop_level = i;
2722 if (path->nodes[*level] == root->node) {
2723 root_owner = root->root_key.objectid;
2725 btrfs_header_generation(path->nodes[*level]);
2727 struct extent_buffer *node;
2728 node = path->nodes[*level + 1];
2729 root_owner = btrfs_header_owner(node);
2730 root_gen = btrfs_header_generation(node);
2732 ret = btrfs_free_extent(trans, root,
2733 path->nodes[*level]->start,
2734 path->nodes[*level]->len,
2735 root_owner, root_gen, 0, 0, 1);
2737 free_extent_buffer(path->nodes[*level]);
2738 path->nodes[*level] = NULL;
2746 * drop the reference count on the tree rooted at 'snap'. This traverses
2747 * the tree freeing any blocks that have a ref count of zero after being
2750 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2756 struct btrfs_path *path;
2759 struct btrfs_root_item *root_item = &root->root_item;
2761 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2762 path = btrfs_alloc_path();
2765 level = btrfs_header_level(root->node);
2767 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2768 path->nodes[level] = root->node;
2769 extent_buffer_get(root->node);
2770 path->slots[level] = 0;
2772 struct btrfs_key key;
2773 struct btrfs_disk_key found_key;
2774 struct extent_buffer *node;
2776 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2777 level = root_item->drop_level;
2778 path->lowest_level = level;
2779 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2784 node = path->nodes[level];
2785 btrfs_node_key(node, &found_key, path->slots[level]);
2786 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2787 sizeof(found_key)));
2789 * unlock our path, this is safe because only this
2790 * function is allowed to delete this snapshot
2792 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2793 if (path->nodes[i] && path->locks[i]) {
2795 btrfs_tree_unlock(path->nodes[i]);
2800 wret = walk_down_tree(trans, root, path, &level);
2806 wret = walk_up_tree(trans, root, path, &level);
2811 if (trans->transaction->in_commit) {
2815 atomic_inc(&root->fs_info->throttle_gen);
2816 wake_up(&root->fs_info->transaction_throttle);
2818 for (i = 0; i <= orig_level; i++) {
2819 if (path->nodes[i]) {
2820 free_extent_buffer(path->nodes[i]);
2821 path->nodes[i] = NULL;
2825 btrfs_free_path(path);
2829 int btrfs_free_block_groups(struct btrfs_fs_info *info)
2836 mutex_lock(&info->alloc_mutex);
2838 ret = find_first_extent_bit(&info->block_group_cache, 0,
2839 &start, &end, (unsigned int)-1);
2842 ret = get_state_private(&info->block_group_cache, start, &ptr);
2844 kfree((void *)(unsigned long)ptr);
2845 clear_extent_bits(&info->block_group_cache, start,
2846 end, (unsigned int)-1, GFP_NOFS);
2849 ret = find_first_extent_bit(&info->free_space_cache, 0,
2850 &start, &end, EXTENT_DIRTY);
2853 clear_extent_dirty(&info->free_space_cache, start,
2856 mutex_unlock(&info->alloc_mutex);
2860 static unsigned long calc_ra(unsigned long start, unsigned long last,
2863 return min(last, start + nr - 1);
2866 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2871 unsigned long last_index;
2874 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2875 struct file_ra_state *ra;
2876 unsigned long total_read = 0;
2877 unsigned long ra_pages;
2878 struct btrfs_ordered_extent *ordered;
2879 struct btrfs_trans_handle *trans;
2881 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2883 mutex_lock(&inode->i_mutex);
2884 i = start >> PAGE_CACHE_SHIFT;
2885 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2887 ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2889 file_ra_state_init(ra, inode->i_mapping);
2891 for (; i <= last_index; i++) {
2892 if (total_read % ra_pages == 0) {
2893 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2894 calc_ra(i, last_index, ra_pages));
2898 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2899 goto truncate_racing;
2900 page = grab_cache_page(inode->i_mapping, i);
2904 if (!PageUptodate(page)) {
2905 btrfs_readpage(NULL, page);
2907 if (!PageUptodate(page)) {
2909 page_cache_release(page);
2913 wait_on_page_writeback(page);
2915 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2916 page_end = page_start + PAGE_CACHE_SIZE - 1;
2917 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2919 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2921 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2923 page_cache_release(page);
2924 btrfs_start_ordered_extent(inode, ordered, 1);
2925 btrfs_put_ordered_extent(ordered);
2928 set_page_extent_mapped(page);
2931 * make sure page_mkwrite is called for this page if userland
2932 * wants to change it from mmap
2934 clear_page_dirty_for_io(page);
2936 btrfs_set_extent_delalloc(inode, page_start, page_end);
2937 set_page_dirty(page);
2939 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2941 page_cache_release(page);
2945 /* we have to start the IO in order to get the ordered extents
2946 * instantiated. This allows the relocation to code to wait
2947 * for all the ordered extents to hit the disk.
2949 * Otherwise, it would constantly loop over the same extents
2950 * because the old ones don't get deleted until the IO is
2953 btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2956 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2958 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2959 mark_inode_dirty(inode);
2961 mutex_unlock(&inode->i_mutex);
2965 vmtruncate(inode, inode->i_size);
2966 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2972 * The back references tell us which tree holds a ref on a block,
2973 * but it is possible for the tree root field in the reference to
2974 * reflect the original root before a snapshot was made. In this
2975 * case we should search through all the children of a given root
2976 * to find potential holders of references on a block.
2978 * Instead, we do something a little less fancy and just search
2979 * all the roots for a given key/block combination.
2981 static int find_root_for_ref(struct btrfs_root *root,
2982 struct btrfs_path *path,
2983 struct btrfs_key *key0,
2986 struct btrfs_root **found_root,
2989 struct btrfs_key root_location;
2990 struct btrfs_root *cur_root = *found_root;
2991 struct btrfs_file_extent_item *file_extent;
2992 u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
2996 root_location.offset = (u64)-1;
2997 root_location.type = BTRFS_ROOT_ITEM_KEY;
2998 path->lowest_level = level;
3001 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
3003 if (ret == 0 && file_key) {
3004 struct extent_buffer *leaf = path->nodes[0];
3005 file_extent = btrfs_item_ptr(leaf, path->slots[0],
3006 struct btrfs_file_extent_item);
3007 if (btrfs_file_extent_type(leaf, file_extent) ==
3008 BTRFS_FILE_EXTENT_REG) {
3010 btrfs_file_extent_disk_bytenr(leaf,
3013 } else if (!file_key) {
3014 if (path->nodes[level])
3015 found_bytenr = path->nodes[level]->start;
3018 btrfs_release_path(cur_root, path);
3020 if (found_bytenr == bytenr) {
3021 *found_root = cur_root;
3025 ret = btrfs_search_root(root->fs_info->tree_root,
3026 root_search_start, &root_search_start);
3030 root_location.objectid = root_search_start;
3031 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
3039 path->lowest_level = 0;
3044 * note, this releases the path
3046 static int noinline relocate_one_reference(struct btrfs_root *extent_root,
3047 struct btrfs_path *path,
3048 struct btrfs_key *extent_key,
3049 u64 *last_file_objectid,
3050 u64 *last_file_offset,
3051 u64 *last_file_root,
3054 struct inode *inode;
3055 struct btrfs_root *found_root;
3056 struct btrfs_key root_location;
3057 struct btrfs_key found_key;
3058 struct btrfs_extent_ref *ref;
3066 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
3068 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
3069 struct btrfs_extent_ref);
3070 ref_root = btrfs_ref_root(path->nodes[0], ref);
3071 ref_gen = btrfs_ref_generation(path->nodes[0], ref);
3072 ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
3073 ref_offset = btrfs_ref_offset(path->nodes[0], ref);
3074 btrfs_release_path(extent_root, path);
3076 root_location.objectid = ref_root;
3078 root_location.offset = 0;
3080 root_location.offset = (u64)-1;
3081 root_location.type = BTRFS_ROOT_ITEM_KEY;
3083 found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
3085 BUG_ON(!found_root);
3086 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3088 if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
3089 found_key.objectid = ref_objectid;
3090 found_key.type = BTRFS_EXTENT_DATA_KEY;
3091 found_key.offset = ref_offset;
3094 if (last_extent == extent_key->objectid &&
3095 *last_file_objectid == ref_objectid &&
3096 *last_file_offset == ref_offset &&
3097 *last_file_root == ref_root)
3100 ret = find_root_for_ref(extent_root, path, &found_key,
3101 level, 1, &found_root,
3102 extent_key->objectid);
3107 if (last_extent == extent_key->objectid &&
3108 *last_file_objectid == ref_objectid &&
3109 *last_file_offset == ref_offset &&
3110 *last_file_root == ref_root)
3113 inode = btrfs_iget_locked(extent_root->fs_info->sb,
3114 ref_objectid, found_root);
3115 if (inode->i_state & I_NEW) {
3116 /* the inode and parent dir are two different roots */
3117 BTRFS_I(inode)->root = found_root;
3118 BTRFS_I(inode)->location.objectid = ref_objectid;
3119 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
3120 BTRFS_I(inode)->location.offset = 0;
3121 btrfs_read_locked_inode(inode);
3122 unlock_new_inode(inode);
3125 /* this can happen if the reference is not against
3126 * the latest version of the tree root
3128 if (is_bad_inode(inode))
3131 *last_file_objectid = inode->i_ino;
3132 *last_file_root = found_root->root_key.objectid;
3133 *last_file_offset = ref_offset;
3135 relocate_inode_pages(inode, ref_offset, extent_key->offset);
3138 struct btrfs_trans_handle *trans;
3139 struct extent_buffer *eb;
3142 eb = read_tree_block(found_root, extent_key->objectid,
3143 extent_key->offset, 0);
3144 btrfs_tree_lock(eb);
3145 level = btrfs_header_level(eb);
3148 btrfs_item_key_to_cpu(eb, &found_key, 0);
3150 btrfs_node_key_to_cpu(eb, &found_key, 0);
3152 btrfs_tree_unlock(eb);
3153 free_extent_buffer(eb);
3155 ret = find_root_for_ref(extent_root, path, &found_key,
3156 level, 0, &found_root,
3157 extent_key->objectid);
3163 * right here almost anything could happen to our key,
3164 * but that's ok. The cow below will either relocate it
3165 * or someone else will have relocated it. Either way,
3166 * it is in a different spot than it was before and
3170 trans = btrfs_start_transaction(found_root, 1);
3172 if (found_root == extent_root->fs_info->extent_root ||
3173 found_root == extent_root->fs_info->chunk_root ||
3174 found_root == extent_root->fs_info->dev_root) {
3176 mutex_lock(&extent_root->fs_info->alloc_mutex);
3179 path->lowest_level = level;
3181 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3183 path->lowest_level = 0;
3184 btrfs_release_path(found_root, path);
3186 if (found_root == found_root->fs_info->extent_root)
3187 btrfs_extent_post_op(trans, found_root);
3189 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3191 btrfs_end_transaction(trans, found_root);
3195 mutex_lock(&extent_root->fs_info->alloc_mutex);
3199 static int noinline del_extent_zero(struct btrfs_root *extent_root,
3200 struct btrfs_path *path,
3201 struct btrfs_key *extent_key)
3204 struct btrfs_trans_handle *trans;
3206 trans = btrfs_start_transaction(extent_root, 1);
3207 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3214 ret = btrfs_del_item(trans, extent_root, path);
3216 btrfs_end_transaction(trans, extent_root);
3220 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3221 struct btrfs_path *path,
3222 struct btrfs_key *extent_key)
3224 struct btrfs_key key;
3225 struct btrfs_key found_key;
3226 struct extent_buffer *leaf;
3227 u64 last_file_objectid = 0;
3228 u64 last_file_root = 0;
3229 u64 last_file_offset = (u64)-1;
3230 u64 last_extent = 0;
3235 if (extent_key->objectid == 0) {
3236 ret = del_extent_zero(extent_root, path, extent_key);
3239 key.objectid = extent_key->objectid;
3240 key.type = BTRFS_EXTENT_REF_KEY;
3244 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3250 leaf = path->nodes[0];
3251 nritems = btrfs_header_nritems(leaf);
3252 if (path->slots[0] == nritems) {
3253 ret = btrfs_next_leaf(extent_root, path);
3260 leaf = path->nodes[0];
3263 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3264 if (found_key.objectid != extent_key->objectid) {
3268 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3272 key.offset = found_key.offset + 1;
3273 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3275 ret = relocate_one_reference(extent_root, path, extent_key,
3276 &last_file_objectid,
3278 &last_file_root, last_extent);
3281 last_extent = extent_key->objectid;
3285 btrfs_release_path(extent_root, path);
3289 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3292 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3293 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3295 num_devices = root->fs_info->fs_devices->num_devices;
3296 if (num_devices == 1) {
3297 stripped |= BTRFS_BLOCK_GROUP_DUP;
3298 stripped = flags & ~stripped;
3300 /* turn raid0 into single device chunks */
3301 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3304 /* turn mirroring into duplication */
3305 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3306 BTRFS_BLOCK_GROUP_RAID10))
3307 return stripped | BTRFS_BLOCK_GROUP_DUP;
3310 /* they already had raid on here, just return */
3311 if (flags & stripped)
3314 stripped |= BTRFS_BLOCK_GROUP_DUP;
3315 stripped = flags & ~stripped;
3317 /* switch duplicated blocks with raid1 */
3318 if (flags & BTRFS_BLOCK_GROUP_DUP)
3319 return stripped | BTRFS_BLOCK_GROUP_RAID1;
3321 /* turn single device chunks into raid0 */
3322 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3327 int __alloc_chunk_for_shrink(struct btrfs_root *root,
3328 struct btrfs_block_group_cache *shrink_block_group,
3331 struct btrfs_trans_handle *trans;
3332 u64 new_alloc_flags;
3335 spin_lock(&shrink_block_group->lock);
3336 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3337 spin_unlock(&shrink_block_group->lock);
3338 mutex_unlock(&root->fs_info->alloc_mutex);
3340 trans = btrfs_start_transaction(root, 1);
3341 mutex_lock(&root->fs_info->alloc_mutex);
3342 spin_lock(&shrink_block_group->lock);
3344 new_alloc_flags = update_block_group_flags(root,
3345 shrink_block_group->flags);
3346 if (new_alloc_flags != shrink_block_group->flags) {
3348 btrfs_block_group_used(&shrink_block_group->item);
3350 calc = shrink_block_group->key.offset;
3352 spin_unlock(&shrink_block_group->lock);
3354 do_chunk_alloc(trans, root->fs_info->extent_root,
3355 calc + 2 * 1024 * 1024, new_alloc_flags, force);
3357 mutex_unlock(&root->fs_info->alloc_mutex);
3358 btrfs_end_transaction(trans, root);
3359 mutex_lock(&root->fs_info->alloc_mutex);
3361 spin_unlock(&shrink_block_group->lock);
3365 int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3367 struct btrfs_trans_handle *trans;
3368 struct btrfs_root *tree_root = root->fs_info->tree_root;
3369 struct btrfs_path *path;
3372 u64 shrink_last_byte;
3373 struct btrfs_block_group_cache *shrink_block_group;
3374 struct btrfs_fs_info *info = root->fs_info;
3375 struct btrfs_key key;
3376 struct btrfs_key found_key;
3377 struct extent_buffer *leaf;
3382 mutex_lock(&root->fs_info->alloc_mutex);
3383 shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3385 BUG_ON(!shrink_block_group);
3387 shrink_last_byte = shrink_block_group->key.objectid +
3388 shrink_block_group->key.offset;
3390 shrink_block_group->space_info->total_bytes -=
3391 shrink_block_group->key.offset;
3392 path = btrfs_alloc_path();
3393 root = root->fs_info->extent_root;
3396 printk("btrfs relocating block group %llu flags %llu\n",
3397 (unsigned long long)shrink_start,
3398 (unsigned long long)shrink_block_group->flags);
3400 __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3404 shrink_block_group->ro = 1;
3408 key.objectid = shrink_start;
3411 cur_byte = key.objectid;
3413 mutex_unlock(&root->fs_info->alloc_mutex);
3415 btrfs_start_delalloc_inodes(root);
3416 btrfs_wait_ordered_extents(tree_root, 0);
3418 mutex_lock(&root->fs_info->alloc_mutex);
3420 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3424 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3429 leaf = path->nodes[0];
3430 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3431 if (found_key.objectid + found_key.offset > shrink_start &&
3432 found_key.objectid < shrink_last_byte) {
3433 cur_byte = found_key.objectid;
3434 key.objectid = cur_byte;
3437 btrfs_release_path(root, path);
3440 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3445 leaf = path->nodes[0];
3446 nritems = btrfs_header_nritems(leaf);
3447 if (path->slots[0] >= nritems) {
3448 ret = btrfs_next_leaf(root, path);
3455 leaf = path->nodes[0];
3456 nritems = btrfs_header_nritems(leaf);
3459 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3461 if (found_key.objectid >= shrink_last_byte)
3464 if (progress && need_resched()) {
3465 memcpy(&key, &found_key, sizeof(key));
3467 btrfs_release_path(root, path);
3468 btrfs_search_slot(NULL, root, &key, path, 0, 0);
3474 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3475 found_key.objectid + found_key.offset <= cur_byte) {
3476 memcpy(&key, &found_key, sizeof(key));
3483 cur_byte = found_key.objectid + found_key.offset;
3484 key.objectid = cur_byte;
3485 btrfs_release_path(root, path);
3486 ret = relocate_one_extent(root, path, &found_key);
3487 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3490 btrfs_release_path(root, path);
3492 if (total_found > 0) {
3493 printk("btrfs relocate found %llu last extent was %llu\n",
3494 (unsigned long long)total_found,
3495 (unsigned long long)found_key.objectid);
3496 mutex_unlock(&root->fs_info->alloc_mutex);
3497 trans = btrfs_start_transaction(tree_root, 1);
3498 btrfs_commit_transaction(trans, tree_root);
3500 btrfs_clean_old_snapshots(tree_root);
3502 btrfs_start_delalloc_inodes(root);
3503 btrfs_wait_ordered_extents(tree_root, 0);
3505 trans = btrfs_start_transaction(tree_root, 1);
3506 btrfs_commit_transaction(trans, tree_root);
3507 mutex_lock(&root->fs_info->alloc_mutex);
3512 * we've freed all the extents, now remove the block
3513 * group item from the tree
3515 mutex_unlock(&root->fs_info->alloc_mutex);
3517 trans = btrfs_start_transaction(root, 1);
3519 mutex_lock(&root->fs_info->alloc_mutex);
3520 memcpy(&key, &shrink_block_group->key, sizeof(key));
3522 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3526 btrfs_end_transaction(trans, root);
3530 clear_extent_bits(&info->block_group_cache, key.objectid,
3531 key.objectid + key.offset - 1,
3532 (unsigned int)-1, GFP_NOFS);
3535 clear_extent_bits(&info->free_space_cache,
3536 key.objectid, key.objectid + key.offset - 1,
3537 (unsigned int)-1, GFP_NOFS);
3540 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3541 kfree(shrink_block_group);
3544 btrfs_del_item(trans, root, path);
3545 btrfs_release_path(root, path);
3546 mutex_unlock(&root->fs_info->alloc_mutex);
3547 btrfs_commit_transaction(trans, root);
3549 mutex_lock(&root->fs_info->alloc_mutex);
3551 /* the code to unpin extents might set a few bits in the free
3552 * space cache for this range again
3554 clear_extent_bits(&info->free_space_cache,
3555 key.objectid, key.objectid + key.offset - 1,
3556 (unsigned int)-1, GFP_NOFS);
3558 btrfs_free_path(path);
3559 mutex_unlock(&root->fs_info->alloc_mutex);
3563 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3564 struct btrfs_key *key)
3567 struct btrfs_key found_key;
3568 struct extent_buffer *leaf;
3571 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3576 slot = path->slots[0];
3577 leaf = path->nodes[0];
3578 if (slot >= btrfs_header_nritems(leaf)) {
3579 ret = btrfs_next_leaf(root, path);
3586 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3588 if (found_key.objectid >= key->objectid &&
3589 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3600 int btrfs_read_block_groups(struct btrfs_root *root)
3602 struct btrfs_path *path;
3605 struct btrfs_block_group_cache *cache;
3606 struct btrfs_fs_info *info = root->fs_info;
3607 struct btrfs_space_info *space_info;
3608 struct extent_io_tree *block_group_cache;
3609 struct btrfs_key key;
3610 struct btrfs_key found_key;
3611 struct extent_buffer *leaf;
3613 block_group_cache = &info->block_group_cache;
3614 root = info->extent_root;
3617 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3618 path = btrfs_alloc_path();
3622 mutex_lock(&root->fs_info->alloc_mutex);
3624 ret = find_first_block_group(root, path, &key);
3632 leaf = path->nodes[0];
3633 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3634 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3640 spin_lock_init(&cache->lock);
3641 read_extent_buffer(leaf, &cache->item,
3642 btrfs_item_ptr_offset(leaf, path->slots[0]),
3643 sizeof(cache->item));
3644 memcpy(&cache->key, &found_key, sizeof(found_key));
3646 key.objectid = found_key.objectid + found_key.offset;
3647 btrfs_release_path(root, path);
3648 cache->flags = btrfs_block_group_flags(&cache->item);
3650 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3651 bit = BLOCK_GROUP_DATA;
3652 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3653 bit = BLOCK_GROUP_SYSTEM;
3654 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3655 bit = BLOCK_GROUP_METADATA;
3657 set_avail_alloc_bits(info, cache->flags);
3659 ret = update_space_info(info, cache->flags, found_key.offset,
3660 btrfs_block_group_used(&cache->item),
3663 cache->space_info = space_info;
3665 /* use EXTENT_LOCKED to prevent merging */
3666 set_extent_bits(block_group_cache, found_key.objectid,
3667 found_key.objectid + found_key.offset - 1,
3668 EXTENT_LOCKED, GFP_NOFS);
3669 set_state_private(block_group_cache, found_key.objectid,
3670 (unsigned long)cache);
3671 set_extent_bits(block_group_cache, found_key.objectid,
3672 found_key.objectid + found_key.offset - 1,
3673 bit | EXTENT_LOCKED, GFP_NOFS);
3675 btrfs_super_total_bytes(&info->super_copy))
3680 btrfs_free_path(path);
3681 mutex_unlock(&root->fs_info->alloc_mutex);
3685 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3686 struct btrfs_root *root, u64 bytes_used,
3687 u64 type, u64 chunk_objectid, u64 chunk_offset,
3692 struct btrfs_root *extent_root;
3693 struct btrfs_block_group_cache *cache;
3694 struct extent_io_tree *block_group_cache;
3696 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3697 extent_root = root->fs_info->extent_root;
3698 block_group_cache = &root->fs_info->block_group_cache;
3700 root->fs_info->last_trans_new_blockgroup = trans->transid;
3702 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3704 cache->key.objectid = chunk_offset;
3705 cache->key.offset = size;
3706 spin_lock_init(&cache->lock);
3707 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3709 btrfs_set_block_group_used(&cache->item, bytes_used);
3710 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3711 cache->flags = type;
3712 btrfs_set_block_group_flags(&cache->item, type);
3714 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3715 &cache->space_info);
3718 bit = block_group_state_bits(type);
3719 set_extent_bits(block_group_cache, chunk_offset,
3720 chunk_offset + size - 1,
3721 EXTENT_LOCKED, GFP_NOFS);
3722 set_state_private(block_group_cache, chunk_offset,
3723 (unsigned long)cache);
3724 set_extent_bits(block_group_cache, chunk_offset,
3725 chunk_offset + size - 1,
3726 bit | EXTENT_LOCKED, GFP_NOFS);
3728 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3729 sizeof(cache->item));
3732 finish_current_insert(trans, extent_root);
3733 ret = del_pending_extents(trans, extent_root);
3735 set_avail_alloc_bits(extent_root->fs_info, type);