struct extent_buffer *buf;
                buf = btrfs_find_tree_block(root, bytenr, num_bytes);
                if (buf) {
-                       if (!btrfs_try_tree_lock(buf) &&
+                       if (btrfs_try_tree_lock(buf) &&
                            btrfs_buffer_uptodate(buf, 0)) {
                                u64 transid =
                                    root->fs_info->running_transaction->transid;
                set_state_private(block_group_cache, found_key.objectid,
                                  (unsigned long)cache);
 
-               /* hack for now */
-               if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
-                       cache_block_group(root->fs_info->extent_root,
-                                         cache);
-               }
                if (key.objectid >=
                    btrfs_super_total_bytes(&info->super_copy))
                        break;
 
        eb = kmem_cache_zalloc(extent_buffer_cache, mask);
        eb->start = start;
        eb->len = len;
+       mutex_init(&eb->mutex);
        spin_lock_irqsave(&leak_lock, flags);
        list_add(&eb->leak_list, &buffers);
        spin_unlock_irqrestore(&leak_lock, flags);
 
        for (i = 0; i < num_pages; i++) {
                page = extent_buffer_page(eb, i);
+               lock_page(page);
                if (i == 0)
                        set_page_extent_head(page, eb->len);
                else
                        end  = start + PAGE_CACHE_SIZE - 1;
                        if (test_range_bit(tree, start, end,
                                           EXTENT_DIRTY, 0)) {
+                               unlock_page(page);
                                continue;
                        }
                }
                                                PAGECACHE_TAG_DIRTY);
                }
                read_unlock_irq(&page->mapping->tree_lock);
+               unlock_page(page);
        }
        return 0;
 }
                 * on us if the page isn't already dirty.
                 */
                if (i == 0) {
+                       lock_page(page);
                        set_page_extent_head(page, eb->len);
                } else if (PagePrivate(page) &&
                           page->private != EXTENT_PAGE_PRIVATE) {
+                       lock_page(page);
                        set_page_extent_mapped(page);
+                       unlock_page(page);
                }
                __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
+               if (i == 0)
+                       unlock_page(page);
        }
        return set_extent_dirty(tree, eb->start,
                                eb->start + eb->len - 1, GFP_NOFS);
 
        int flags;
        struct list_head leak_list;
        struct rb_node rb_node;
+       struct mutex mutex;
 };
 
 struct extent_map_tree;
 
 {
        int i;
 
-       if (!TestSetPageLocked(eb->first_page))
+       if (mutex_trylock(&eb->mutex))
                return 0;
        for (i = 0; i < 512; i++) {
                cpu_relax();
-               if (!TestSetPageLocked(eb->first_page))
+               if (mutex_trylock(&eb->mutex))
                        return 0;
        }
        cpu_relax();
-       lock_page(eb->first_page);
+       mutex_lock(&eb->mutex);
        return 0;
 }
 
 int btrfs_try_tree_lock(struct extent_buffer *eb)
 {
-       return TestSetPageLocked(eb->first_page);
+       return mutex_trylock(&eb->mutex);
 }
 
 int btrfs_tree_unlock(struct extent_buffer *eb)
 {
-       WARN_ON(!PageLocked(eb->first_page));
-       unlock_page(eb->first_page);
+       mutex_unlock(&eb->mutex);
        return 0;
 }
 
 int btrfs_tree_locked(struct extent_buffer *eb)
 {
-       return PageLocked(eb->first_page);
+       return mutex_is_locked(&eb->mutex);
 }