2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/xattr.h>
38 #include <linux/posix_acl.h>
39 #include <linux/falloc.h>
43 #include "transaction.h"
44 #include "btrfs_inode.h"
46 #include "print-tree.h"
48 #include "ordered-data.h"
51 #include "ref-cache.h"
52 #include "compression.h"
55 struct btrfs_iget_args {
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
89 static noinline int cow_file_range(struct inode *inode,
90 struct page *locked_page,
91 u64 start, u64 end, int *page_started,
92 unsigned long *nr_written, int unlock);
94 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
98 err = btrfs_init_acl(inode, dir);
100 err = btrfs_xattr_security_init(inode, dir);
105 * this does all the hard work for inserting an inline extent into
106 * the btree. The caller should have done a btrfs_drop_extents so that
107 * no overlapping inline items exist in the btree
109 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
110 struct btrfs_root *root, struct inode *inode,
111 u64 start, size_t size, size_t compressed_size,
112 struct page **compressed_pages)
114 struct btrfs_key key;
115 struct btrfs_path *path;
116 struct extent_buffer *leaf;
117 struct page *page = NULL;
120 struct btrfs_file_extent_item *ei;
123 size_t cur_size = size;
125 unsigned long offset;
126 int use_compress = 0;
128 if (compressed_size && compressed_pages) {
130 cur_size = compressed_size;
133 path = btrfs_alloc_path();
137 btrfs_set_trans_block_group(trans, inode);
139 key.objectid = inode->i_ino;
141 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
142 datasize = btrfs_file_extent_calc_inline_size(cur_size);
144 inode_add_bytes(inode, size);
145 ret = btrfs_insert_empty_item(trans, root, path, &key,
152 leaf = path->nodes[0];
153 ei = btrfs_item_ptr(leaf, path->slots[0],
154 struct btrfs_file_extent_item);
155 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
156 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
157 btrfs_set_file_extent_encryption(leaf, ei, 0);
158 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
159 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
160 ptr = btrfs_file_extent_inline_start(ei);
165 while (compressed_size > 0) {
166 cpage = compressed_pages[i];
167 cur_size = min_t(unsigned long, compressed_size,
171 write_extent_buffer(leaf, kaddr, ptr, cur_size);
176 compressed_size -= cur_size;
178 btrfs_set_file_extent_compression(leaf, ei,
179 BTRFS_COMPRESS_ZLIB);
181 page = find_get_page(inode->i_mapping,
182 start >> PAGE_CACHE_SHIFT);
183 btrfs_set_file_extent_compression(leaf, ei, 0);
184 kaddr = kmap_atomic(page, KM_USER0);
185 offset = start & (PAGE_CACHE_SIZE - 1);
186 write_extent_buffer(leaf, kaddr + offset, ptr, size);
187 kunmap_atomic(kaddr, KM_USER0);
188 page_cache_release(page);
190 btrfs_mark_buffer_dirty(leaf);
191 btrfs_free_path(path);
193 BTRFS_I(inode)->disk_i_size = inode->i_size;
194 btrfs_update_inode(trans, root, inode);
197 btrfs_free_path(path);
203 * conditionally insert an inline extent into the file. This
204 * does the checks required to make sure the data is small enough
205 * to fit as an inline extent.
207 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
208 struct btrfs_root *root,
209 struct inode *inode, u64 start, u64 end,
210 size_t compressed_size,
211 struct page **compressed_pages)
213 u64 isize = i_size_read(inode);
214 u64 actual_end = min(end + 1, isize);
215 u64 inline_len = actual_end - start;
216 u64 aligned_end = (end + root->sectorsize - 1) &
217 ~((u64)root->sectorsize - 1);
219 u64 data_len = inline_len;
223 data_len = compressed_size;
226 actual_end >= PAGE_CACHE_SIZE ||
227 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
229 (actual_end & (root->sectorsize - 1)) == 0) ||
231 data_len > root->fs_info->max_inline) {
235 ret = btrfs_drop_extents(trans, root, inode, start,
236 aligned_end, start, &hint_byte);
239 if (isize > actual_end)
240 inline_len = min_t(u64, isize, actual_end);
241 ret = insert_inline_extent(trans, root, inode, start,
242 inline_len, compressed_size,
245 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
249 struct async_extent {
254 unsigned long nr_pages;
255 struct list_head list;
260 struct btrfs_root *root;
261 struct page *locked_page;
264 struct list_head extents;
265 struct btrfs_work work;
268 static noinline int add_async_extent(struct async_cow *cow,
269 u64 start, u64 ram_size,
272 unsigned long nr_pages)
274 struct async_extent *async_extent;
276 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
277 async_extent->start = start;
278 async_extent->ram_size = ram_size;
279 async_extent->compressed_size = compressed_size;
280 async_extent->pages = pages;
281 async_extent->nr_pages = nr_pages;
282 list_add_tail(&async_extent->list, &cow->extents);
287 * we create compressed extents in two phases. The first
288 * phase compresses a range of pages that have already been
289 * locked (both pages and state bits are locked).
291 * This is done inside an ordered work queue, and the compression
292 * is spread across many cpus. The actual IO submission is step
293 * two, and the ordered work queue takes care of making sure that
294 * happens in the same order things were put onto the queue by
295 * writepages and friends.
297 * If this code finds it can't get good compression, it puts an
298 * entry onto the work queue to write the uncompressed bytes. This
299 * makes sure that both compressed inodes and uncompressed inodes
300 * are written in the same order that pdflush sent them down.
302 static noinline int compress_file_range(struct inode *inode,
303 struct page *locked_page,
305 struct async_cow *async_cow,
308 struct btrfs_root *root = BTRFS_I(inode)->root;
309 struct btrfs_trans_handle *trans;
313 u64 blocksize = root->sectorsize;
315 u64 isize = i_size_read(inode);
317 struct page **pages = NULL;
318 unsigned long nr_pages;
319 unsigned long nr_pages_ret = 0;
320 unsigned long total_compressed = 0;
321 unsigned long total_in = 0;
322 unsigned long max_compressed = 128 * 1024;
323 unsigned long max_uncompressed = 128 * 1024;
329 actual_end = min_t(u64, isize, end + 1);
332 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
333 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
336 * we don't want to send crud past the end of i_size through
337 * compression, that's just a waste of CPU time. So, if the
338 * end of the file is before the start of our current
339 * requested range of bytes, we bail out to the uncompressed
340 * cleanup code that can deal with all of this.
342 * It isn't really the fastest way to fix things, but this is a
343 * very uncommon corner.
345 if (actual_end <= start)
346 goto cleanup_and_bail_uncompressed;
348 total_compressed = actual_end - start;
350 /* we want to make sure that amount of ram required to uncompress
351 * an extent is reasonable, so we limit the total size in ram
352 * of a compressed extent to 128k. This is a crucial number
353 * because it also controls how easily we can spread reads across
354 * cpus for decompression.
356 * We also want to make sure the amount of IO required to do
357 * a random read is reasonably small, so we limit the size of
358 * a compressed extent to 128k.
360 total_compressed = min(total_compressed, max_uncompressed);
361 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
362 num_bytes = max(blocksize, num_bytes);
363 disk_num_bytes = num_bytes;
368 * we do compression for mount -o compress and when the
369 * inode has not been flagged as nocompress. This flag can
370 * change at any time if we discover bad compression ratios.
372 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
373 btrfs_test_opt(root, COMPRESS)) {
375 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
377 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
378 total_compressed, pages,
379 nr_pages, &nr_pages_ret,
385 unsigned long offset = total_compressed &
386 (PAGE_CACHE_SIZE - 1);
387 struct page *page = pages[nr_pages_ret - 1];
390 /* zero the tail end of the last page, we might be
391 * sending it down to disk
394 kaddr = kmap_atomic(page, KM_USER0);
395 memset(kaddr + offset, 0,
396 PAGE_CACHE_SIZE - offset);
397 kunmap_atomic(kaddr, KM_USER0);
403 trans = btrfs_join_transaction(root, 1);
405 btrfs_set_trans_block_group(trans, inode);
407 /* lets try to make an inline extent */
408 if (ret || total_in < (actual_end - start)) {
409 /* we didn't compress the entire range, try
410 * to make an uncompressed inline extent.
412 ret = cow_file_range_inline(trans, root, inode,
413 start, end, 0, NULL);
415 /* try making a compressed inline extent */
416 ret = cow_file_range_inline(trans, root, inode,
418 total_compressed, pages);
420 btrfs_end_transaction(trans, root);
423 * inline extent creation worked, we don't need
424 * to create any more async work items. Unlock
425 * and free up our temp pages.
427 extent_clear_unlock_delalloc(inode,
428 &BTRFS_I(inode)->io_tree,
429 start, end, NULL, 1, 0,
438 * we aren't doing an inline extent round the compressed size
439 * up to a block size boundary so the allocator does sane
442 total_compressed = (total_compressed + blocksize - 1) &
446 * one last check to make sure the compression is really a
447 * win, compare the page count read with the blocks on disk
449 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
450 ~(PAGE_CACHE_SIZE - 1);
451 if (total_compressed >= total_in) {
454 disk_num_bytes = total_compressed;
455 num_bytes = total_in;
458 if (!will_compress && pages) {
460 * the compression code ran but failed to make things smaller,
461 * free any pages it allocated and our page pointer array
463 for (i = 0; i < nr_pages_ret; i++) {
464 WARN_ON(pages[i]->mapping);
465 page_cache_release(pages[i]);
469 total_compressed = 0;
472 /* flag the file so we don't compress in the future */
473 btrfs_set_flag(inode, NOCOMPRESS);
478 /* the async work queues will take care of doing actual
479 * allocation on disk for these compressed pages,
480 * and will submit them to the elevator.
482 add_async_extent(async_cow, start, num_bytes,
483 total_compressed, pages, nr_pages_ret);
485 if (start + num_bytes < end && start + num_bytes < actual_end) {
492 cleanup_and_bail_uncompressed:
494 * No compression, but we still need to write the pages in
495 * the file we've been given so far. redirty the locked
496 * page if it corresponds to our extent and set things up
497 * for the async work queue to run cow_file_range to do
498 * the normal delalloc dance
500 if (page_offset(locked_page) >= start &&
501 page_offset(locked_page) <= end) {
502 __set_page_dirty_nobuffers(locked_page);
503 /* unlocked later on in the async handlers */
505 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
513 for (i = 0; i < nr_pages_ret; i++) {
514 WARN_ON(pages[i]->mapping);
515 page_cache_release(pages[i]);
523 * phase two of compressed writeback. This is the ordered portion
524 * of the code, which only gets called in the order the work was
525 * queued. We walk all the async extents created by compress_file_range
526 * and send them down to the disk.
528 static noinline int submit_compressed_extents(struct inode *inode,
529 struct async_cow *async_cow)
531 struct async_extent *async_extent;
533 struct btrfs_trans_handle *trans;
534 struct btrfs_key ins;
535 struct extent_map *em;
536 struct btrfs_root *root = BTRFS_I(inode)->root;
537 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
538 struct extent_io_tree *io_tree;
541 if (list_empty(&async_cow->extents))
544 trans = btrfs_join_transaction(root, 1);
546 while (!list_empty(&async_cow->extents)) {
547 async_extent = list_entry(async_cow->extents.next,
548 struct async_extent, list);
549 list_del(&async_extent->list);
551 io_tree = &BTRFS_I(inode)->io_tree;
553 /* did the compression code fall back to uncompressed IO? */
554 if (!async_extent->pages) {
555 int page_started = 0;
556 unsigned long nr_written = 0;
558 lock_extent(io_tree, async_extent->start,
559 async_extent->start +
560 async_extent->ram_size - 1, GFP_NOFS);
562 /* allocate blocks */
563 cow_file_range(inode, async_cow->locked_page,
565 async_extent->start +
566 async_extent->ram_size - 1,
567 &page_started, &nr_written, 0);
570 * if page_started, cow_file_range inserted an
571 * inline extent and took care of all the unlocking
572 * and IO for us. Otherwise, we need to submit
573 * all those pages down to the drive.
576 extent_write_locked_range(io_tree,
577 inode, async_extent->start,
578 async_extent->start +
579 async_extent->ram_size - 1,
587 lock_extent(io_tree, async_extent->start,
588 async_extent->start + async_extent->ram_size - 1,
591 * here we're doing allocation and writeback of the
594 btrfs_drop_extent_cache(inode, async_extent->start,
595 async_extent->start +
596 async_extent->ram_size - 1, 0);
598 ret = btrfs_reserve_extent(trans, root,
599 async_extent->compressed_size,
600 async_extent->compressed_size,
604 em = alloc_extent_map(GFP_NOFS);
605 em->start = async_extent->start;
606 em->len = async_extent->ram_size;
607 em->orig_start = em->start;
609 em->block_start = ins.objectid;
610 em->block_len = ins.offset;
611 em->bdev = root->fs_info->fs_devices->latest_bdev;
612 set_bit(EXTENT_FLAG_PINNED, &em->flags);
613 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
616 spin_lock(&em_tree->lock);
617 ret = add_extent_mapping(em_tree, em);
618 spin_unlock(&em_tree->lock);
619 if (ret != -EEXIST) {
623 btrfs_drop_extent_cache(inode, async_extent->start,
624 async_extent->start +
625 async_extent->ram_size - 1, 0);
628 ret = btrfs_add_ordered_extent(inode, async_extent->start,
630 async_extent->ram_size,
632 BTRFS_ORDERED_COMPRESSED);
635 btrfs_end_transaction(trans, root);
638 * clear dirty, set writeback and unlock the pages.
640 extent_clear_unlock_delalloc(inode,
641 &BTRFS_I(inode)->io_tree,
643 async_extent->start +
644 async_extent->ram_size - 1,
645 NULL, 1, 1, 0, 1, 1, 0);
647 ret = btrfs_submit_compressed_write(inode,
649 async_extent->ram_size,
651 ins.offset, async_extent->pages,
652 async_extent->nr_pages);
655 trans = btrfs_join_transaction(root, 1);
656 alloc_hint = ins.objectid + ins.offset;
661 btrfs_end_transaction(trans, root);
666 * when extent_io.c finds a delayed allocation range in the file,
667 * the call backs end up in this code. The basic idea is to
668 * allocate extents on disk for the range, and create ordered data structs
669 * in ram to track those extents.
671 * locked_page is the page that writepage had locked already. We use
672 * it to make sure we don't do extra locks or unlocks.
674 * *page_started is set to one if we unlock locked_page and do everything
675 * required to start IO on it. It may be clean and already done with
678 static noinline int cow_file_range(struct inode *inode,
679 struct page *locked_page,
680 u64 start, u64 end, int *page_started,
681 unsigned long *nr_written,
684 struct btrfs_root *root = BTRFS_I(inode)->root;
685 struct btrfs_trans_handle *trans;
688 unsigned long ram_size;
691 u64 blocksize = root->sectorsize;
693 u64 isize = i_size_read(inode);
694 struct btrfs_key ins;
695 struct extent_map *em;
696 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
699 trans = btrfs_join_transaction(root, 1);
701 btrfs_set_trans_block_group(trans, inode);
703 actual_end = min_t(u64, isize, end + 1);
705 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
706 num_bytes = max(blocksize, num_bytes);
707 disk_num_bytes = num_bytes;
711 /* lets try to make an inline extent */
712 ret = cow_file_range_inline(trans, root, inode,
713 start, end, 0, NULL);
715 extent_clear_unlock_delalloc(inode,
716 &BTRFS_I(inode)->io_tree,
717 start, end, NULL, 1, 1,
719 *nr_written = *nr_written +
720 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
727 BUG_ON(disk_num_bytes >
728 btrfs_super_total_bytes(&root->fs_info->super_copy));
730 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
732 while (disk_num_bytes > 0) {
733 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
734 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
735 root->sectorsize, 0, alloc_hint,
739 em = alloc_extent_map(GFP_NOFS);
741 em->orig_start = em->start;
743 ram_size = ins.offset;
744 em->len = ins.offset;
746 em->block_start = ins.objectid;
747 em->block_len = ins.offset;
748 em->bdev = root->fs_info->fs_devices->latest_bdev;
749 set_bit(EXTENT_FLAG_PINNED, &em->flags);
752 spin_lock(&em_tree->lock);
753 ret = add_extent_mapping(em_tree, em);
754 spin_unlock(&em_tree->lock);
755 if (ret != -EEXIST) {
759 btrfs_drop_extent_cache(inode, start,
760 start + ram_size - 1, 0);
763 cur_alloc_size = ins.offset;
764 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
765 ram_size, cur_alloc_size, 0);
768 if (root->root_key.objectid ==
769 BTRFS_DATA_RELOC_TREE_OBJECTID) {
770 ret = btrfs_reloc_clone_csums(inode, start,
775 if (disk_num_bytes < cur_alloc_size)
778 /* we're not doing compressed IO, don't unlock the first
779 * page (which the caller expects to stay locked), don't
780 * clear any dirty bits and don't set any writeback bits
782 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
783 start, start + ram_size - 1,
784 locked_page, unlock, 1,
786 disk_num_bytes -= cur_alloc_size;
787 num_bytes -= cur_alloc_size;
788 alloc_hint = ins.objectid + ins.offset;
789 start += cur_alloc_size;
793 btrfs_end_transaction(trans, root);
799 * work queue call back to started compression on a file and pages
801 static noinline void async_cow_start(struct btrfs_work *work)
803 struct async_cow *async_cow;
805 async_cow = container_of(work, struct async_cow, work);
807 compress_file_range(async_cow->inode, async_cow->locked_page,
808 async_cow->start, async_cow->end, async_cow,
811 async_cow->inode = NULL;
815 * work queue call back to submit previously compressed pages
817 static noinline void async_cow_submit(struct btrfs_work *work)
819 struct async_cow *async_cow;
820 struct btrfs_root *root;
821 unsigned long nr_pages;
823 async_cow = container_of(work, struct async_cow, work);
825 root = async_cow->root;
826 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
829 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
831 if (atomic_read(&root->fs_info->async_delalloc_pages) <
833 waitqueue_active(&root->fs_info->async_submit_wait))
834 wake_up(&root->fs_info->async_submit_wait);
836 if (async_cow->inode)
837 submit_compressed_extents(async_cow->inode, async_cow);
840 static noinline void async_cow_free(struct btrfs_work *work)
842 struct async_cow *async_cow;
843 async_cow = container_of(work, struct async_cow, work);
847 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
848 u64 start, u64 end, int *page_started,
849 unsigned long *nr_written)
851 struct async_cow *async_cow;
852 struct btrfs_root *root = BTRFS_I(inode)->root;
853 unsigned long nr_pages;
855 int limit = 10 * 1024 * 1042;
857 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
858 EXTENT_DELALLOC, 1, 0, GFP_NOFS);
859 while (start < end) {
860 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
861 async_cow->inode = inode;
862 async_cow->root = root;
863 async_cow->locked_page = locked_page;
864 async_cow->start = start;
866 if (btrfs_test_flag(inode, NOCOMPRESS))
869 cur_end = min(end, start + 512 * 1024 - 1);
871 async_cow->end = cur_end;
872 INIT_LIST_HEAD(&async_cow->extents);
874 async_cow->work.func = async_cow_start;
875 async_cow->work.ordered_func = async_cow_submit;
876 async_cow->work.ordered_free = async_cow_free;
877 async_cow->work.flags = 0;
879 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
881 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
883 btrfs_queue_worker(&root->fs_info->delalloc_workers,
886 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
887 wait_event(root->fs_info->async_submit_wait,
888 (atomic_read(&root->fs_info->async_delalloc_pages) <
892 while (atomic_read(&root->fs_info->async_submit_draining) &&
893 atomic_read(&root->fs_info->async_delalloc_pages)) {
894 wait_event(root->fs_info->async_submit_wait,
895 (atomic_read(&root->fs_info->async_delalloc_pages) ==
899 *nr_written += nr_pages;
906 static noinline int csum_exist_in_range(struct btrfs_root *root,
907 u64 bytenr, u64 num_bytes)
910 struct btrfs_ordered_sum *sums;
913 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
914 bytenr + num_bytes - 1, &list);
915 if (ret == 0 && list_empty(&list))
918 while (!list_empty(&list)) {
919 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
920 list_del(&sums->list);
927 * when nowcow writeback call back. This checks for snapshots or COW copies
928 * of the extents that exist in the file, and COWs the file as required.
930 * If no cow copies or snapshots exist, we write directly to the existing
933 static noinline int run_delalloc_nocow(struct inode *inode,
934 struct page *locked_page,
935 u64 start, u64 end, int *page_started, int force,
936 unsigned long *nr_written)
938 struct btrfs_root *root = BTRFS_I(inode)->root;
939 struct btrfs_trans_handle *trans;
940 struct extent_buffer *leaf;
941 struct btrfs_path *path;
942 struct btrfs_file_extent_item *fi;
943 struct btrfs_key found_key;
955 path = btrfs_alloc_path();
957 trans = btrfs_join_transaction(root, 1);
963 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
966 if (ret > 0 && path->slots[0] > 0 && check_prev) {
967 leaf = path->nodes[0];
968 btrfs_item_key_to_cpu(leaf, &found_key,
970 if (found_key.objectid == inode->i_ino &&
971 found_key.type == BTRFS_EXTENT_DATA_KEY)
976 leaf = path->nodes[0];
977 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
978 ret = btrfs_next_leaf(root, path);
983 leaf = path->nodes[0];
989 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
991 if (found_key.objectid > inode->i_ino ||
992 found_key.type > BTRFS_EXTENT_DATA_KEY ||
993 found_key.offset > end)
996 if (found_key.offset > cur_offset) {
997 extent_end = found_key.offset;
1001 fi = btrfs_item_ptr(leaf, path->slots[0],
1002 struct btrfs_file_extent_item);
1003 extent_type = btrfs_file_extent_type(leaf, fi);
1005 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1006 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1007 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1008 extent_end = found_key.offset +
1009 btrfs_file_extent_num_bytes(leaf, fi);
1010 if (extent_end <= start) {
1014 if (disk_bytenr == 0)
1016 if (btrfs_file_extent_compression(leaf, fi) ||
1017 btrfs_file_extent_encryption(leaf, fi) ||
1018 btrfs_file_extent_other_encoding(leaf, fi))
1020 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1022 if (btrfs_extent_readonly(root, disk_bytenr))
1024 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1027 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
1028 disk_bytenr += cur_offset - found_key.offset;
1029 num_bytes = min(end + 1, extent_end) - cur_offset;
1031 * force cow if csum exists in the range.
1032 * this ensure that csum for a given extent are
1033 * either valid or do not exist.
1035 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1038 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1039 extent_end = found_key.offset +
1040 btrfs_file_extent_inline_len(leaf, fi);
1041 extent_end = ALIGN(extent_end, root->sectorsize);
1046 if (extent_end <= start) {
1051 if (cow_start == (u64)-1)
1052 cow_start = cur_offset;
1053 cur_offset = extent_end;
1054 if (cur_offset > end)
1060 btrfs_release_path(root, path);
1061 if (cow_start != (u64)-1) {
1062 ret = cow_file_range(inode, locked_page, cow_start,
1063 found_key.offset - 1, page_started,
1066 cow_start = (u64)-1;
1069 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1070 struct extent_map *em;
1071 struct extent_map_tree *em_tree;
1072 em_tree = &BTRFS_I(inode)->extent_tree;
1073 em = alloc_extent_map(GFP_NOFS);
1074 em->start = cur_offset;
1075 em->orig_start = em->start;
1076 em->len = num_bytes;
1077 em->block_len = num_bytes;
1078 em->block_start = disk_bytenr;
1079 em->bdev = root->fs_info->fs_devices->latest_bdev;
1080 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1082 spin_lock(&em_tree->lock);
1083 ret = add_extent_mapping(em_tree, em);
1084 spin_unlock(&em_tree->lock);
1085 if (ret != -EEXIST) {
1086 free_extent_map(em);
1089 btrfs_drop_extent_cache(inode, em->start,
1090 em->start + em->len - 1, 0);
1092 type = BTRFS_ORDERED_PREALLOC;
1094 type = BTRFS_ORDERED_NOCOW;
1097 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1098 num_bytes, num_bytes, type);
1101 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1102 cur_offset, cur_offset + num_bytes - 1,
1103 locked_page, 1, 1, 1, 0, 0, 0);
1104 cur_offset = extent_end;
1105 if (cur_offset > end)
1108 btrfs_release_path(root, path);
1110 if (cur_offset <= end && cow_start == (u64)-1)
1111 cow_start = cur_offset;
1112 if (cow_start != (u64)-1) {
1113 ret = cow_file_range(inode, locked_page, cow_start, end,
1114 page_started, nr_written, 1);
1118 ret = btrfs_end_transaction(trans, root);
1120 btrfs_free_path(path);
1125 * extent_io.c call back to do delayed allocation processing
1127 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1128 u64 start, u64 end, int *page_started,
1129 unsigned long *nr_written)
1132 struct btrfs_root *root = BTRFS_I(inode)->root;
1134 if (btrfs_test_flag(inode, NODATACOW))
1135 ret = run_delalloc_nocow(inode, locked_page, start, end,
1136 page_started, 1, nr_written);
1137 else if (btrfs_test_flag(inode, PREALLOC))
1138 ret = run_delalloc_nocow(inode, locked_page, start, end,
1139 page_started, 0, nr_written);
1140 else if (!btrfs_test_opt(root, COMPRESS))
1141 ret = cow_file_range(inode, locked_page, start, end,
1142 page_started, nr_written, 1);
1144 ret = cow_file_range_async(inode, locked_page, start, end,
1145 page_started, nr_written);
1150 * extent_io.c set_bit_hook, used to track delayed allocation
1151 * bytes in this file, and to maintain the list of inodes that
1152 * have pending delalloc work to be done.
1154 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1155 unsigned long old, unsigned long bits)
1158 * set_bit and clear bit hooks normally require _irqsave/restore
1159 * but in this case, we are only testeing for the DELALLOC
1160 * bit, which is only set or cleared with irqs on
1162 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1163 struct btrfs_root *root = BTRFS_I(inode)->root;
1164 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1165 spin_lock(&root->fs_info->delalloc_lock);
1166 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1167 root->fs_info->delalloc_bytes += end - start + 1;
1168 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1169 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1170 &root->fs_info->delalloc_inodes);
1172 spin_unlock(&root->fs_info->delalloc_lock);
1178 * extent_io.c clear_bit_hook, see set_bit_hook for why
1180 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1181 unsigned long old, unsigned long bits)
1184 * set_bit and clear bit hooks normally require _irqsave/restore
1185 * but in this case, we are only testeing for the DELALLOC
1186 * bit, which is only set or cleared with irqs on
1188 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1189 struct btrfs_root *root = BTRFS_I(inode)->root;
1191 spin_lock(&root->fs_info->delalloc_lock);
1192 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1193 printk(KERN_INFO "btrfs warning: delalloc account "
1195 (unsigned long long)end - start + 1,
1196 (unsigned long long)
1197 root->fs_info->delalloc_bytes);
1198 btrfs_delalloc_free_space(root, inode, (u64)-1);
1199 root->fs_info->delalloc_bytes = 0;
1200 BTRFS_I(inode)->delalloc_bytes = 0;
1202 btrfs_delalloc_free_space(root, inode,
1204 root->fs_info->delalloc_bytes -= end - start + 1;
1205 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1207 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1208 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1209 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1211 spin_unlock(&root->fs_info->delalloc_lock);
1217 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1218 * we don't create bios that span stripes or chunks
1220 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1221 size_t size, struct bio *bio,
1222 unsigned long bio_flags)
1224 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1225 struct btrfs_mapping_tree *map_tree;
1226 u64 logical = (u64)bio->bi_sector << 9;
1231 if (bio_flags & EXTENT_BIO_COMPRESSED)
1234 length = bio->bi_size;
1235 map_tree = &root->fs_info->mapping_tree;
1236 map_length = length;
1237 ret = btrfs_map_block(map_tree, READ, logical,
1238 &map_length, NULL, 0);
1240 if (map_length < length + size)
1246 * in order to insert checksums into the metadata in large chunks,
1247 * we wait until bio submission time. All the pages in the bio are
1248 * checksummed and sums are attached onto the ordered extent record.
1250 * At IO completion time the cums attached on the ordered extent record
1251 * are inserted into the btree
1253 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1254 struct bio *bio, int mirror_num,
1255 unsigned long bio_flags)
1257 struct btrfs_root *root = BTRFS_I(inode)->root;
1260 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1266 * in order to insert checksums into the metadata in large chunks,
1267 * we wait until bio submission time. All the pages in the bio are
1268 * checksummed and sums are attached onto the ordered extent record.
1270 * At IO completion time the cums attached on the ordered extent record
1271 * are inserted into the btree
1273 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1274 int mirror_num, unsigned long bio_flags)
1276 struct btrfs_root *root = BTRFS_I(inode)->root;
1277 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1281 * extent_io.c submission hook. This does the right thing for csum calculation
1282 * on write, or reading the csums from the tree before a read
1284 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1285 int mirror_num, unsigned long bio_flags)
1287 struct btrfs_root *root = BTRFS_I(inode)->root;
1291 skip_sum = btrfs_test_flag(inode, NODATASUM);
1293 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1296 if (!(rw & (1 << BIO_RW))) {
1297 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1298 return btrfs_submit_compressed_read(inode, bio,
1299 mirror_num, bio_flags);
1300 } else if (!skip_sum)
1301 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1303 } else if (!skip_sum) {
1304 /* csum items have already been cloned */
1305 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1307 /* we're doing a write, do the async checksumming */
1308 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1309 inode, rw, bio, mirror_num,
1310 bio_flags, __btrfs_submit_bio_start,
1311 __btrfs_submit_bio_done);
1315 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1319 * given a list of ordered sums record them in the inode. This happens
1320 * at IO completion time based on sums calculated at bio submission time.
1322 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1323 struct inode *inode, u64 file_offset,
1324 struct list_head *list)
1326 struct btrfs_ordered_sum *sum;
1328 btrfs_set_trans_block_group(trans, inode);
1330 list_for_each_entry(sum, list, list) {
1331 btrfs_csum_file_blocks(trans,
1332 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1337 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1339 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1341 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1345 /* see btrfs_writepage_start_hook for details on why this is required */
1346 struct btrfs_writepage_fixup {
1348 struct btrfs_work work;
1351 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1353 struct btrfs_writepage_fixup *fixup;
1354 struct btrfs_ordered_extent *ordered;
1356 struct inode *inode;
1360 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1364 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1365 ClearPageChecked(page);
1369 inode = page->mapping->host;
1370 page_start = page_offset(page);
1371 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1373 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1375 /* already ordered? We're done */
1376 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
1377 EXTENT_ORDERED, 0)) {
1381 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1383 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1384 page_end, GFP_NOFS);
1386 btrfs_start_ordered_extent(inode, ordered, 1);
1390 btrfs_set_extent_delalloc(inode, page_start, page_end);
1391 ClearPageChecked(page);
1393 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1396 page_cache_release(page);
1400 * There are a few paths in the higher layers of the kernel that directly
1401 * set the page dirty bit without asking the filesystem if it is a
1402 * good idea. This causes problems because we want to make sure COW
1403 * properly happens and the data=ordered rules are followed.
1405 * In our case any range that doesn't have the ORDERED bit set
1406 * hasn't been properly setup for IO. We kick off an async process
1407 * to fix it up. The async helper will wait for ordered extents, set
1408 * the delalloc bit and make it safe to write the page.
1410 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1412 struct inode *inode = page->mapping->host;
1413 struct btrfs_writepage_fixup *fixup;
1414 struct btrfs_root *root = BTRFS_I(inode)->root;
1417 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1422 if (PageChecked(page))
1425 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1429 SetPageChecked(page);
1430 page_cache_get(page);
1431 fixup->work.func = btrfs_writepage_fixup_worker;
1433 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1437 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1438 struct inode *inode, u64 file_pos,
1439 u64 disk_bytenr, u64 disk_num_bytes,
1440 u64 num_bytes, u64 ram_bytes,
1441 u8 compression, u8 encryption,
1442 u16 other_encoding, int extent_type)
1444 struct btrfs_root *root = BTRFS_I(inode)->root;
1445 struct btrfs_file_extent_item *fi;
1446 struct btrfs_path *path;
1447 struct extent_buffer *leaf;
1448 struct btrfs_key ins;
1452 path = btrfs_alloc_path();
1455 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1456 file_pos + num_bytes, file_pos, &hint);
1459 ins.objectid = inode->i_ino;
1460 ins.offset = file_pos;
1461 ins.type = BTRFS_EXTENT_DATA_KEY;
1462 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1464 leaf = path->nodes[0];
1465 fi = btrfs_item_ptr(leaf, path->slots[0],
1466 struct btrfs_file_extent_item);
1467 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1468 btrfs_set_file_extent_type(leaf, fi, extent_type);
1469 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1470 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1471 btrfs_set_file_extent_offset(leaf, fi, 0);
1472 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1473 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1474 btrfs_set_file_extent_compression(leaf, fi, compression);
1475 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1476 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1477 btrfs_mark_buffer_dirty(leaf);
1479 inode_add_bytes(inode, num_bytes);
1480 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1482 ins.objectid = disk_bytenr;
1483 ins.offset = disk_num_bytes;
1484 ins.type = BTRFS_EXTENT_ITEM_KEY;
1485 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1486 root->root_key.objectid,
1487 trans->transid, inode->i_ino, &ins);
1490 btrfs_free_path(path);
1494 /* as ordered data IO finishes, this gets called so we can finish
1495 * an ordered extent if the range of bytes in the file it covers are
1498 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1500 struct btrfs_root *root = BTRFS_I(inode)->root;
1501 struct btrfs_trans_handle *trans;
1502 struct btrfs_ordered_extent *ordered_extent;
1503 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1504 struct btrfs_path *path;
1508 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1513 * before we join the transaction, try to do some of our IO.
1514 * This will limit the amount of IO that we have to do with
1515 * the transaction running. We're unlikely to need to do any
1516 * IO if the file extents are new, the disk_i_size checks
1517 * covers the most common case.
1519 if (start < BTRFS_I(inode)->disk_i_size) {
1520 path = btrfs_alloc_path();
1522 ret = btrfs_lookup_file_extent(NULL, root, path,
1525 btrfs_free_path(path);
1529 trans = btrfs_join_transaction(root, 1);
1531 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1532 BUG_ON(!ordered_extent);
1533 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1536 lock_extent(io_tree, ordered_extent->file_offset,
1537 ordered_extent->file_offset + ordered_extent->len - 1,
1540 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1542 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1544 ret = btrfs_mark_extent_written(trans, root, inode,
1545 ordered_extent->file_offset,
1546 ordered_extent->file_offset +
1547 ordered_extent->len);
1550 ret = insert_reserved_file_extent(trans, inode,
1551 ordered_extent->file_offset,
1552 ordered_extent->start,
1553 ordered_extent->disk_len,
1554 ordered_extent->len,
1555 ordered_extent->len,
1557 BTRFS_FILE_EXTENT_REG);
1560 unlock_extent(io_tree, ordered_extent->file_offset,
1561 ordered_extent->file_offset + ordered_extent->len - 1,
1564 add_pending_csums(trans, inode, ordered_extent->file_offset,
1565 &ordered_extent->list);
1567 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1568 btrfs_ordered_update_i_size(inode, ordered_extent);
1569 btrfs_update_inode(trans, root, inode);
1570 btrfs_remove_ordered_extent(inode, ordered_extent);
1571 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1574 btrfs_put_ordered_extent(ordered_extent);
1575 /* once for the tree */
1576 btrfs_put_ordered_extent(ordered_extent);
1578 btrfs_end_transaction(trans, root);
1582 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1583 struct extent_state *state, int uptodate)
1585 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1589 * When IO fails, either with EIO or csum verification fails, we
1590 * try other mirrors that might have a good copy of the data. This
1591 * io_failure_record is used to record state as we go through all the
1592 * mirrors. If another mirror has good data, the page is set up to date
1593 * and things continue. If a good mirror can't be found, the original
1594 * bio end_io callback is called to indicate things have failed.
1596 struct io_failure_record {
1601 unsigned long bio_flags;
1605 static int btrfs_io_failed_hook(struct bio *failed_bio,
1606 struct page *page, u64 start, u64 end,
1607 struct extent_state *state)
1609 struct io_failure_record *failrec = NULL;
1611 struct extent_map *em;
1612 struct inode *inode = page->mapping->host;
1613 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1614 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1621 ret = get_state_private(failure_tree, start, &private);
1623 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1626 failrec->start = start;
1627 failrec->len = end - start + 1;
1628 failrec->last_mirror = 0;
1629 failrec->bio_flags = 0;
1631 spin_lock(&em_tree->lock);
1632 em = lookup_extent_mapping(em_tree, start, failrec->len);
1633 if (em->start > start || em->start + em->len < start) {
1634 free_extent_map(em);
1637 spin_unlock(&em_tree->lock);
1639 if (!em || IS_ERR(em)) {
1643 logical = start - em->start;
1644 logical = em->block_start + logical;
1645 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1646 logical = em->block_start;
1647 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1649 failrec->logical = logical;
1650 free_extent_map(em);
1651 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1652 EXTENT_DIRTY, GFP_NOFS);
1653 set_state_private(failure_tree, start,
1654 (u64)(unsigned long)failrec);
1656 failrec = (struct io_failure_record *)(unsigned long)private;
1658 num_copies = btrfs_num_copies(
1659 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1660 failrec->logical, failrec->len);
1661 failrec->last_mirror++;
1663 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1664 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1667 if (state && state->start != failrec->start)
1669 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1671 if (!state || failrec->last_mirror > num_copies) {
1672 set_state_private(failure_tree, failrec->start, 0);
1673 clear_extent_bits(failure_tree, failrec->start,
1674 failrec->start + failrec->len - 1,
1675 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1679 bio = bio_alloc(GFP_NOFS, 1);
1680 bio->bi_private = state;
1681 bio->bi_end_io = failed_bio->bi_end_io;
1682 bio->bi_sector = failrec->logical >> 9;
1683 bio->bi_bdev = failed_bio->bi_bdev;
1686 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1687 if (failed_bio->bi_rw & (1 << BIO_RW))
1692 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1693 failrec->last_mirror,
1694 failrec->bio_flags);
1699 * each time an IO finishes, we do a fast check in the IO failure tree
1700 * to see if we need to process or clean up an io_failure_record
1702 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1705 u64 private_failure;
1706 struct io_failure_record *failure;
1710 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1711 (u64)-1, 1, EXTENT_DIRTY)) {
1712 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1713 start, &private_failure);
1715 failure = (struct io_failure_record *)(unsigned long)
1717 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1719 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1721 failure->start + failure->len - 1,
1722 EXTENT_DIRTY | EXTENT_LOCKED,
1731 * when reads are done, we need to check csums to verify the data is correct
1732 * if there's a match, we allow the bio to finish. If not, we go through
1733 * the io_failure_record routines to find good copies
1735 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1736 struct extent_state *state)
1738 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1739 struct inode *inode = page->mapping->host;
1740 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1742 u64 private = ~(u32)0;
1744 struct btrfs_root *root = BTRFS_I(inode)->root;
1747 if (PageChecked(page)) {
1748 ClearPageChecked(page);
1751 if (btrfs_test_flag(inode, NODATASUM))
1754 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1755 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1)) {
1756 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1761 if (state && state->start == start) {
1762 private = state->private;
1765 ret = get_state_private(io_tree, start, &private);
1767 kaddr = kmap_atomic(page, KM_USER0);
1771 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1772 btrfs_csum_final(csum, (char *)&csum);
1773 if (csum != private)
1776 kunmap_atomic(kaddr, KM_USER0);
1778 /* if the io failure tree for this inode is non-empty,
1779 * check to see if we've recovered from a failed IO
1781 btrfs_clean_io_failures(inode, start);
1785 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1786 "private %llu\n", page->mapping->host->i_ino,
1787 (unsigned long long)start, csum,
1788 (unsigned long long)private);
1789 memset(kaddr + offset, 1, end - start + 1);
1790 flush_dcache_page(page);
1791 kunmap_atomic(kaddr, KM_USER0);
1798 * This creates an orphan entry for the given inode in case something goes
1799 * wrong in the middle of an unlink/truncate.
1801 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1803 struct btrfs_root *root = BTRFS_I(inode)->root;
1806 spin_lock(&root->list_lock);
1808 /* already on the orphan list, we're good */
1809 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1810 spin_unlock(&root->list_lock);
1814 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1816 spin_unlock(&root->list_lock);
1819 * insert an orphan item to track this unlinked/truncated file
1821 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1827 * We have done the truncate/delete so we can go ahead and remove the orphan
1828 * item for this particular inode.
1830 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1832 struct btrfs_root *root = BTRFS_I(inode)->root;
1835 spin_lock(&root->list_lock);
1837 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1838 spin_unlock(&root->list_lock);
1842 list_del_init(&BTRFS_I(inode)->i_orphan);
1844 spin_unlock(&root->list_lock);
1848 spin_unlock(&root->list_lock);
1850 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1856 * this cleans up any orphans that may be left on the list from the last use
1859 void btrfs_orphan_cleanup(struct btrfs_root *root)
1861 struct btrfs_path *path;
1862 struct extent_buffer *leaf;
1863 struct btrfs_item *item;
1864 struct btrfs_key key, found_key;
1865 struct btrfs_trans_handle *trans;
1866 struct inode *inode;
1867 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1869 path = btrfs_alloc_path();
1874 key.objectid = BTRFS_ORPHAN_OBJECTID;
1875 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1876 key.offset = (u64)-1;
1880 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1882 printk(KERN_ERR "Error searching slot for orphan: %d"
1888 * if ret == 0 means we found what we were searching for, which
1889 * is weird, but possible, so only screw with path if we didnt
1890 * find the key and see if we have stuff that matches
1893 if (path->slots[0] == 0)
1898 /* pull out the item */
1899 leaf = path->nodes[0];
1900 item = btrfs_item_nr(leaf, path->slots[0]);
1901 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1903 /* make sure the item matches what we want */
1904 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1906 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1909 /* release the path since we're done with it */
1910 btrfs_release_path(root, path);
1913 * this is where we are basically btrfs_lookup, without the
1914 * crossing root thing. we store the inode number in the
1915 * offset of the orphan item.
1917 inode = btrfs_iget_locked(root->fs_info->sb,
1918 found_key.offset, root);
1922 if (inode->i_state & I_NEW) {
1923 BTRFS_I(inode)->root = root;
1925 /* have to set the location manually */
1926 BTRFS_I(inode)->location.objectid = inode->i_ino;
1927 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1928 BTRFS_I(inode)->location.offset = 0;
1930 btrfs_read_locked_inode(inode);
1931 unlock_new_inode(inode);
1935 * add this inode to the orphan list so btrfs_orphan_del does
1936 * the proper thing when we hit it
1938 spin_lock(&root->list_lock);
1939 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1940 spin_unlock(&root->list_lock);
1943 * if this is a bad inode, means we actually succeeded in
1944 * removing the inode, but not the orphan record, which means
1945 * we need to manually delete the orphan since iput will just
1946 * do a destroy_inode
1948 if (is_bad_inode(inode)) {
1949 trans = btrfs_start_transaction(root, 1);
1950 btrfs_orphan_del(trans, inode);
1951 btrfs_end_transaction(trans, root);
1956 /* if we have links, this was a truncate, lets do that */
1957 if (inode->i_nlink) {
1959 btrfs_truncate(inode);
1964 /* this will do delete_inode and everything for us */
1969 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1971 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1973 btrfs_free_path(path);
1977 * read an inode from the btree into the in-memory inode
1979 void btrfs_read_locked_inode(struct inode *inode)
1981 struct btrfs_path *path;
1982 struct extent_buffer *leaf;
1983 struct btrfs_inode_item *inode_item;
1984 struct btrfs_timespec *tspec;
1985 struct btrfs_root *root = BTRFS_I(inode)->root;
1986 struct btrfs_key location;
1987 u64 alloc_group_block;
1991 path = btrfs_alloc_path();
1993 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1995 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1999 leaf = path->nodes[0];
2000 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2001 struct btrfs_inode_item);
2003 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2004 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2005 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2006 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2007 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2009 tspec = btrfs_inode_atime(inode_item);
2010 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2011 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2013 tspec = btrfs_inode_mtime(inode_item);
2014 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2015 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2017 tspec = btrfs_inode_ctime(inode_item);
2018 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2019 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2021 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2022 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2023 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2024 inode->i_generation = BTRFS_I(inode)->generation;
2026 rdev = btrfs_inode_rdev(leaf, inode_item);
2028 BTRFS_I(inode)->index_cnt = (u64)-1;
2029 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2031 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2033 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2034 alloc_group_block, 0);
2035 btrfs_free_path(path);
2038 switch (inode->i_mode & S_IFMT) {
2040 inode->i_mapping->a_ops = &btrfs_aops;
2041 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2042 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2043 inode->i_fop = &btrfs_file_operations;
2044 inode->i_op = &btrfs_file_inode_operations;
2047 inode->i_fop = &btrfs_dir_file_operations;
2048 if (root == root->fs_info->tree_root)
2049 inode->i_op = &btrfs_dir_ro_inode_operations;
2051 inode->i_op = &btrfs_dir_inode_operations;
2054 inode->i_op = &btrfs_symlink_inode_operations;
2055 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2056 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2059 inode->i_op = &btrfs_special_inode_operations;
2060 init_special_inode(inode, inode->i_mode, rdev);
2066 btrfs_free_path(path);
2067 make_bad_inode(inode);
2071 * given a leaf and an inode, copy the inode fields into the leaf
2073 static void fill_inode_item(struct btrfs_trans_handle *trans,
2074 struct extent_buffer *leaf,
2075 struct btrfs_inode_item *item,
2076 struct inode *inode)
2078 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2079 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2080 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2081 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2082 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2084 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2085 inode->i_atime.tv_sec);
2086 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2087 inode->i_atime.tv_nsec);
2089 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2090 inode->i_mtime.tv_sec);
2091 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2092 inode->i_mtime.tv_nsec);
2094 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2095 inode->i_ctime.tv_sec);
2096 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2097 inode->i_ctime.tv_nsec);
2099 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2100 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2101 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2102 btrfs_set_inode_transid(leaf, item, trans->transid);
2103 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2104 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2105 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2109 * copy everything in the in-memory inode into the btree.
2111 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2112 struct btrfs_root *root, struct inode *inode)
2114 struct btrfs_inode_item *inode_item;
2115 struct btrfs_path *path;
2116 struct extent_buffer *leaf;
2119 path = btrfs_alloc_path();
2121 ret = btrfs_lookup_inode(trans, root, path,
2122 &BTRFS_I(inode)->location, 1);
2129 btrfs_unlock_up_safe(path, 1);
2130 leaf = path->nodes[0];
2131 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2132 struct btrfs_inode_item);
2134 fill_inode_item(trans, leaf, inode_item, inode);
2135 btrfs_mark_buffer_dirty(leaf);
2136 btrfs_set_inode_last_trans(trans, inode);
2139 btrfs_free_path(path);
2145 * unlink helper that gets used here in inode.c and in the tree logging
2146 * recovery code. It remove a link in a directory with a given name, and
2147 * also drops the back refs in the inode to the directory
2149 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2150 struct btrfs_root *root,
2151 struct inode *dir, struct inode *inode,
2152 const char *name, int name_len)
2154 struct btrfs_path *path;
2156 struct extent_buffer *leaf;
2157 struct btrfs_dir_item *di;
2158 struct btrfs_key key;
2161 path = btrfs_alloc_path();
2167 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2168 name, name_len, -1);
2177 leaf = path->nodes[0];
2178 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2179 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2182 btrfs_release_path(root, path);
2184 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2186 dir->i_ino, &index);
2188 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2189 "inode %lu parent %lu\n", name_len, name,
2190 inode->i_ino, dir->i_ino);
2194 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2195 index, name, name_len, -1);
2204 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2205 btrfs_release_path(root, path);
2207 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2209 BUG_ON(ret != 0 && ret != -ENOENT);
2211 BTRFS_I(dir)->log_dirty_trans = trans->transid;
2213 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2217 btrfs_free_path(path);
2221 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2222 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2223 btrfs_update_inode(trans, root, dir);
2224 btrfs_drop_nlink(inode);
2225 ret = btrfs_update_inode(trans, root, inode);
2226 dir->i_sb->s_dirt = 1;
2231 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2233 struct btrfs_root *root;
2234 struct btrfs_trans_handle *trans;
2235 struct inode *inode = dentry->d_inode;
2237 unsigned long nr = 0;
2239 root = BTRFS_I(dir)->root;
2241 trans = btrfs_start_transaction(root, 1);
2243 btrfs_set_trans_block_group(trans, dir);
2244 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2245 dentry->d_name.name, dentry->d_name.len);
2247 if (inode->i_nlink == 0)
2248 ret = btrfs_orphan_add(trans, inode);
2250 nr = trans->blocks_used;
2252 btrfs_end_transaction_throttle(trans, root);
2253 btrfs_btree_balance_dirty(root, nr);
2257 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2259 struct inode *inode = dentry->d_inode;
2262 struct btrfs_root *root = BTRFS_I(dir)->root;
2263 struct btrfs_trans_handle *trans;
2264 unsigned long nr = 0;
2267 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2268 * the root of a subvolume or snapshot
2270 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2271 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2275 trans = btrfs_start_transaction(root, 1);
2276 btrfs_set_trans_block_group(trans, dir);
2278 err = btrfs_orphan_add(trans, inode);
2282 /* now the directory is empty */
2283 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2284 dentry->d_name.name, dentry->d_name.len);
2286 btrfs_i_size_write(inode, 0);
2289 nr = trans->blocks_used;
2290 ret = btrfs_end_transaction_throttle(trans, root);
2291 btrfs_btree_balance_dirty(root, nr);
2300 * when truncating bytes in a file, it is possible to avoid reading
2301 * the leaves that contain only checksum items. This can be the
2302 * majority of the IO required to delete a large file, but it must
2303 * be done carefully.
2305 * The keys in the level just above the leaves are checked to make sure
2306 * the lowest key in a given leaf is a csum key, and starts at an offset
2307 * after the new size.
2309 * Then the key for the next leaf is checked to make sure it also has
2310 * a checksum item for the same file. If it does, we know our target leaf
2311 * contains only checksum items, and it can be safely freed without reading
2314 * This is just an optimization targeted at large files. It may do
2315 * nothing. It will return 0 unless things went badly.
2317 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2318 struct btrfs_root *root,
2319 struct btrfs_path *path,
2320 struct inode *inode, u64 new_size)
2322 struct btrfs_key key;
2325 struct btrfs_key found_key;
2326 struct btrfs_key other_key;
2327 struct btrfs_leaf_ref *ref;
2331 path->lowest_level = 1;
2332 key.objectid = inode->i_ino;
2333 key.type = BTRFS_CSUM_ITEM_KEY;
2334 key.offset = new_size;
2336 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2340 if (path->nodes[1] == NULL) {
2345 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2346 nritems = btrfs_header_nritems(path->nodes[1]);
2351 if (path->slots[1] >= nritems)
2354 /* did we find a key greater than anything we want to delete? */
2355 if (found_key.objectid > inode->i_ino ||
2356 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2359 /* we check the next key in the node to make sure the leave contains
2360 * only checksum items. This comparison doesn't work if our
2361 * leaf is the last one in the node
2363 if (path->slots[1] + 1 >= nritems) {
2365 /* search forward from the last key in the node, this
2366 * will bring us into the next node in the tree
2368 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2370 /* unlikely, but we inc below, so check to be safe */
2371 if (found_key.offset == (u64)-1)
2374 /* search_forward needs a path with locks held, do the
2375 * search again for the original key. It is possible
2376 * this will race with a balance and return a path that
2377 * we could modify, but this drop is just an optimization
2378 * and is allowed to miss some leaves.
2380 btrfs_release_path(root, path);
2383 /* setup a max key for search_forward */
2384 other_key.offset = (u64)-1;
2385 other_key.type = key.type;
2386 other_key.objectid = key.objectid;
2388 path->keep_locks = 1;
2389 ret = btrfs_search_forward(root, &found_key, &other_key,
2391 path->keep_locks = 0;
2392 if (ret || found_key.objectid != key.objectid ||
2393 found_key.type != key.type) {
2398 key.offset = found_key.offset;
2399 btrfs_release_path(root, path);
2404 /* we know there's one more slot after us in the tree,
2405 * read that key so we can verify it is also a checksum item
2407 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2409 if (found_key.objectid < inode->i_ino)
2412 if (found_key.type != key.type || found_key.offset < new_size)
2416 * if the key for the next leaf isn't a csum key from this objectid,
2417 * we can't be sure there aren't good items inside this leaf.
2420 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2423 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2424 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2426 * it is safe to delete this leaf, it contains only
2427 * csum items from this inode at an offset >= new_size
2429 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2432 if (root->ref_cows && leaf_gen < trans->transid) {
2433 ref = btrfs_alloc_leaf_ref(root, 0);
2435 ref->root_gen = root->root_key.offset;
2436 ref->bytenr = leaf_start;
2438 ref->generation = leaf_gen;
2441 btrfs_sort_leaf_ref(ref);
2443 ret = btrfs_add_leaf_ref(root, ref, 0);
2445 btrfs_free_leaf_ref(root, ref);
2451 btrfs_release_path(root, path);
2453 if (other_key.objectid == inode->i_ino &&
2454 other_key.type == key.type && other_key.offset > key.offset) {
2455 key.offset = other_key.offset;
2461 /* fixup any changes we've made to the path */
2462 path->lowest_level = 0;
2463 path->keep_locks = 0;
2464 btrfs_release_path(root, path);
2471 * this can truncate away extent items, csum items and directory items.
2472 * It starts at a high offset and removes keys until it can't find
2473 * any higher than new_size
2475 * csum items that cross the new i_size are truncated to the new size
2478 * min_type is the minimum key type to truncate down to. If set to 0, this
2479 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2481 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2482 struct btrfs_root *root,
2483 struct inode *inode,
2484 u64 new_size, u32 min_type)
2487 struct btrfs_path *path;
2488 struct btrfs_key key;
2489 struct btrfs_key found_key;
2490 u32 found_type = (u8)-1;
2491 struct extent_buffer *leaf;
2492 struct btrfs_file_extent_item *fi;
2493 u64 extent_start = 0;
2494 u64 extent_num_bytes = 0;
2500 int pending_del_nr = 0;
2501 int pending_del_slot = 0;
2502 int extent_type = -1;
2504 u64 mask = root->sectorsize - 1;
2507 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2508 path = btrfs_alloc_path();
2512 /* FIXME, add redo link to tree so we don't leak on crash */
2513 key.objectid = inode->i_ino;
2514 key.offset = (u64)-1;
2518 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2523 /* there are no items in the tree for us to truncate, we're
2526 if (path->slots[0] == 0) {
2535 leaf = path->nodes[0];
2536 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2537 found_type = btrfs_key_type(&found_key);
2540 if (found_key.objectid != inode->i_ino)
2543 if (found_type < min_type)
2546 item_end = found_key.offset;
2547 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2548 fi = btrfs_item_ptr(leaf, path->slots[0],
2549 struct btrfs_file_extent_item);
2550 extent_type = btrfs_file_extent_type(leaf, fi);
2551 encoding = btrfs_file_extent_compression(leaf, fi);
2552 encoding |= btrfs_file_extent_encryption(leaf, fi);
2553 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2555 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2557 btrfs_file_extent_num_bytes(leaf, fi);
2558 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2559 item_end += btrfs_file_extent_inline_len(leaf,
2564 if (item_end < new_size) {
2565 if (found_type == BTRFS_DIR_ITEM_KEY)
2566 found_type = BTRFS_INODE_ITEM_KEY;
2567 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2568 found_type = BTRFS_EXTENT_DATA_KEY;
2569 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2570 found_type = BTRFS_XATTR_ITEM_KEY;
2571 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2572 found_type = BTRFS_INODE_REF_KEY;
2573 else if (found_type)
2577 btrfs_set_key_type(&key, found_type);
2580 if (found_key.offset >= new_size)
2586 /* FIXME, shrink the extent if the ref count is only 1 */
2587 if (found_type != BTRFS_EXTENT_DATA_KEY)
2590 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2592 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2593 if (!del_item && !encoding) {
2594 u64 orig_num_bytes =
2595 btrfs_file_extent_num_bytes(leaf, fi);
2596 extent_num_bytes = new_size -
2597 found_key.offset + root->sectorsize - 1;
2598 extent_num_bytes = extent_num_bytes &
2599 ~((u64)root->sectorsize - 1);
2600 btrfs_set_file_extent_num_bytes(leaf, fi,
2602 num_dec = (orig_num_bytes -
2604 if (root->ref_cows && extent_start != 0)
2605 inode_sub_bytes(inode, num_dec);
2606 btrfs_mark_buffer_dirty(leaf);
2609 btrfs_file_extent_disk_num_bytes(leaf,
2611 /* FIXME blocksize != 4096 */
2612 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2613 if (extent_start != 0) {
2616 inode_sub_bytes(inode, num_dec);
2618 root_gen = btrfs_header_generation(leaf);
2619 root_owner = btrfs_header_owner(leaf);
2621 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2623 * we can't truncate inline items that have had
2627 btrfs_file_extent_compression(leaf, fi) == 0 &&
2628 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2629 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2630 u32 size = new_size - found_key.offset;
2632 if (root->ref_cows) {
2633 inode_sub_bytes(inode, item_end + 1 -
2637 btrfs_file_extent_calc_inline_size(size);
2638 ret = btrfs_truncate_item(trans, root, path,
2641 } else if (root->ref_cows) {
2642 inode_sub_bytes(inode, item_end + 1 -
2648 if (!pending_del_nr) {
2649 /* no pending yet, add ourselves */
2650 pending_del_slot = path->slots[0];
2652 } else if (pending_del_nr &&
2653 path->slots[0] + 1 == pending_del_slot) {
2654 /* hop on the pending chunk */
2656 pending_del_slot = path->slots[0];
2664 ret = btrfs_free_extent(trans, root, extent_start,
2666 leaf->start, root_owner,
2667 root_gen, inode->i_ino, 0);
2671 if (path->slots[0] == 0) {
2674 btrfs_release_path(root, path);
2675 if (found_type == BTRFS_INODE_ITEM_KEY)
2681 if (pending_del_nr &&
2682 path->slots[0] + 1 != pending_del_slot) {
2683 struct btrfs_key debug;
2685 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2687 ret = btrfs_del_items(trans, root, path,
2692 btrfs_release_path(root, path);
2693 if (found_type == BTRFS_INODE_ITEM_KEY)
2700 if (pending_del_nr) {
2701 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2704 btrfs_free_path(path);
2705 inode->i_sb->s_dirt = 1;
2710 * taken from block_truncate_page, but does cow as it zeros out
2711 * any bytes left in the last page in the file.
2713 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2715 struct inode *inode = mapping->host;
2716 struct btrfs_root *root = BTRFS_I(inode)->root;
2717 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2718 struct btrfs_ordered_extent *ordered;
2720 u32 blocksize = root->sectorsize;
2721 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2722 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2728 if ((offset & (blocksize - 1)) == 0)
2733 page = grab_cache_page(mapping, index);
2737 page_start = page_offset(page);
2738 page_end = page_start + PAGE_CACHE_SIZE - 1;
2740 if (!PageUptodate(page)) {
2741 ret = btrfs_readpage(NULL, page);
2743 if (page->mapping != mapping) {
2745 page_cache_release(page);
2748 if (!PageUptodate(page)) {
2753 wait_on_page_writeback(page);
2755 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2756 set_page_extent_mapped(page);
2758 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2760 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2762 page_cache_release(page);
2763 btrfs_start_ordered_extent(inode, ordered, 1);
2764 btrfs_put_ordered_extent(ordered);
2768 btrfs_set_extent_delalloc(inode, page_start, page_end);
2770 if (offset != PAGE_CACHE_SIZE) {
2772 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2773 flush_dcache_page(page);
2776 ClearPageChecked(page);
2777 set_page_dirty(page);
2778 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2782 page_cache_release(page);
2787 int btrfs_cont_expand(struct inode *inode, loff_t size)
2789 struct btrfs_trans_handle *trans;
2790 struct btrfs_root *root = BTRFS_I(inode)->root;
2791 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2792 struct extent_map *em;
2793 u64 mask = root->sectorsize - 1;
2794 u64 hole_start = (inode->i_size + mask) & ~mask;
2795 u64 block_end = (size + mask) & ~mask;
2801 if (size <= hole_start)
2804 err = btrfs_check_metadata_free_space(root);
2808 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2811 struct btrfs_ordered_extent *ordered;
2812 btrfs_wait_ordered_range(inode, hole_start,
2813 block_end - hole_start);
2814 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2815 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2818 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2819 btrfs_put_ordered_extent(ordered);
2822 trans = btrfs_start_transaction(root, 1);
2823 btrfs_set_trans_block_group(trans, inode);
2825 cur_offset = hole_start;
2827 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2828 block_end - cur_offset, 0);
2829 BUG_ON(IS_ERR(em) || !em);
2830 last_byte = min(extent_map_end(em), block_end);
2831 last_byte = (last_byte + mask) & ~mask;
2832 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2834 hole_size = last_byte - cur_offset;
2835 err = btrfs_drop_extents(trans, root, inode,
2837 cur_offset + hole_size,
2838 cur_offset, &hint_byte);
2841 err = btrfs_insert_file_extent(trans, root,
2842 inode->i_ino, cur_offset, 0,
2843 0, hole_size, 0, hole_size,
2845 btrfs_drop_extent_cache(inode, hole_start,
2848 free_extent_map(em);
2849 cur_offset = last_byte;
2850 if (err || cur_offset >= block_end)
2854 btrfs_end_transaction(trans, root);
2855 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2859 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2861 struct inode *inode = dentry->d_inode;
2864 err = inode_change_ok(inode, attr);
2868 if (S_ISREG(inode->i_mode) &&
2869 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2870 err = btrfs_cont_expand(inode, attr->ia_size);
2875 err = inode_setattr(inode, attr);
2877 if (!err && ((attr->ia_valid & ATTR_MODE)))
2878 err = btrfs_acl_chmod(inode);
2882 void btrfs_delete_inode(struct inode *inode)
2884 struct btrfs_trans_handle *trans;
2885 struct btrfs_root *root = BTRFS_I(inode)->root;
2889 truncate_inode_pages(&inode->i_data, 0);
2890 if (is_bad_inode(inode)) {
2891 btrfs_orphan_del(NULL, inode);
2894 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2896 btrfs_i_size_write(inode, 0);
2897 trans = btrfs_join_transaction(root, 1);
2899 btrfs_set_trans_block_group(trans, inode);
2900 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2902 btrfs_orphan_del(NULL, inode);
2903 goto no_delete_lock;
2906 btrfs_orphan_del(trans, inode);
2908 nr = trans->blocks_used;
2911 btrfs_end_transaction(trans, root);
2912 btrfs_btree_balance_dirty(root, nr);
2916 nr = trans->blocks_used;
2917 btrfs_end_transaction(trans, root);
2918 btrfs_btree_balance_dirty(root, nr);
2924 * this returns the key found in the dir entry in the location pointer.
2925 * If no dir entries were found, location->objectid is 0.
2927 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2928 struct btrfs_key *location)
2930 const char *name = dentry->d_name.name;
2931 int namelen = dentry->d_name.len;
2932 struct btrfs_dir_item *di;
2933 struct btrfs_path *path;
2934 struct btrfs_root *root = BTRFS_I(dir)->root;
2937 path = btrfs_alloc_path();
2940 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2945 if (!di || IS_ERR(di))
2948 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2950 btrfs_free_path(path);
2953 location->objectid = 0;
2958 * when we hit a tree root in a directory, the btrfs part of the inode
2959 * needs to be changed to reflect the root directory of the tree root. This
2960 * is kind of like crossing a mount point.
2962 static int fixup_tree_root_location(struct btrfs_root *root,
2963 struct btrfs_key *location,
2964 struct btrfs_root **sub_root,
2965 struct dentry *dentry)
2967 struct btrfs_root_item *ri;
2969 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2971 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2974 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2975 dentry->d_name.name,
2976 dentry->d_name.len);
2977 if (IS_ERR(*sub_root))
2978 return PTR_ERR(*sub_root);
2980 ri = &(*sub_root)->root_item;
2981 location->objectid = btrfs_root_dirid(ri);
2982 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2983 location->offset = 0;
2988 static noinline void init_btrfs_i(struct inode *inode)
2990 struct btrfs_inode *bi = BTRFS_I(inode);
2993 bi->i_default_acl = NULL;
2998 bi->logged_trans = 0;
2999 bi->delalloc_bytes = 0;
3000 bi->reserved_bytes = 0;
3001 bi->disk_i_size = 0;
3003 bi->index_cnt = (u64)-1;
3004 bi->log_dirty_trans = 0;
3005 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3006 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3007 inode->i_mapping, GFP_NOFS);
3008 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3009 inode->i_mapping, GFP_NOFS);
3010 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3011 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3012 mutex_init(&BTRFS_I(inode)->extent_mutex);
3013 mutex_init(&BTRFS_I(inode)->log_mutex);
3016 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3018 struct btrfs_iget_args *args = p;
3019 inode->i_ino = args->ino;
3020 init_btrfs_i(inode);
3021 BTRFS_I(inode)->root = args->root;
3022 btrfs_set_inode_space_info(args->root, inode);
3026 static int btrfs_find_actor(struct inode *inode, void *opaque)
3028 struct btrfs_iget_args *args = opaque;
3029 return args->ino == inode->i_ino &&
3030 args->root == BTRFS_I(inode)->root;
3033 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
3034 struct btrfs_root *root, int wait)
3036 struct inode *inode;
3037 struct btrfs_iget_args args;
3038 args.ino = objectid;
3042 inode = ilookup5(s, objectid, btrfs_find_actor,
3045 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
3051 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
3052 struct btrfs_root *root)
3054 struct inode *inode;
3055 struct btrfs_iget_args args;
3056 args.ino = objectid;
3059 inode = iget5_locked(s, objectid, btrfs_find_actor,
3060 btrfs_init_locked_inode,
3065 /* Get an inode object given its location and corresponding root.
3066 * Returns in *is_new if the inode was read from disk
3068 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3069 struct btrfs_root *root, int *is_new)
3071 struct inode *inode;
3073 inode = btrfs_iget_locked(s, location->objectid, root);
3075 return ERR_PTR(-EACCES);
3077 if (inode->i_state & I_NEW) {
3078 BTRFS_I(inode)->root = root;
3079 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3080 btrfs_read_locked_inode(inode);
3081 unlock_new_inode(inode);
3092 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3094 struct inode *inode;
3095 struct btrfs_inode *bi = BTRFS_I(dir);
3096 struct btrfs_root *root = bi->root;
3097 struct btrfs_root *sub_root = root;
3098 struct btrfs_key location;
3101 if (dentry->d_name.len > BTRFS_NAME_LEN)
3102 return ERR_PTR(-ENAMETOOLONG);
3104 ret = btrfs_inode_by_name(dir, dentry, &location);
3107 return ERR_PTR(ret);
3110 if (location.objectid) {
3111 ret = fixup_tree_root_location(root, &location, &sub_root,
3114 return ERR_PTR(ret);
3116 return ERR_PTR(-ENOENT);
3117 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
3119 return ERR_CAST(inode);
3124 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3125 struct nameidata *nd)
3127 struct inode *inode;
3129 if (dentry->d_name.len > BTRFS_NAME_LEN)
3130 return ERR_PTR(-ENAMETOOLONG);
3132 inode = btrfs_lookup_dentry(dir, dentry);
3134 return ERR_CAST(inode);
3136 return d_splice_alias(inode, dentry);
3139 static unsigned char btrfs_filetype_table[] = {
3140 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3143 static int btrfs_real_readdir(struct file *filp, void *dirent,
3146 struct inode *inode = filp->f_dentry->d_inode;
3147 struct btrfs_root *root = BTRFS_I(inode)->root;
3148 struct btrfs_item *item;
3149 struct btrfs_dir_item *di;
3150 struct btrfs_key key;
3151 struct btrfs_key found_key;
3152 struct btrfs_path *path;
3155 struct extent_buffer *leaf;
3158 unsigned char d_type;
3163 int key_type = BTRFS_DIR_INDEX_KEY;
3168 /* FIXME, use a real flag for deciding about the key type */
3169 if (root->fs_info->tree_root == root)
3170 key_type = BTRFS_DIR_ITEM_KEY;
3172 /* special case for "." */
3173 if (filp->f_pos == 0) {
3174 over = filldir(dirent, ".", 1,
3181 /* special case for .., just use the back ref */
3182 if (filp->f_pos == 1) {
3183 u64 pino = parent_ino(filp->f_path.dentry);
3184 over = filldir(dirent, "..", 2,
3190 path = btrfs_alloc_path();
3193 btrfs_set_key_type(&key, key_type);
3194 key.offset = filp->f_pos;
3195 key.objectid = inode->i_ino;
3197 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3203 leaf = path->nodes[0];
3204 nritems = btrfs_header_nritems(leaf);
3205 slot = path->slots[0];
3206 if (advance || slot >= nritems) {
3207 if (slot >= nritems - 1) {
3208 ret = btrfs_next_leaf(root, path);
3211 leaf = path->nodes[0];
3212 nritems = btrfs_header_nritems(leaf);
3213 slot = path->slots[0];
3221 item = btrfs_item_nr(leaf, slot);
3222 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3224 if (found_key.objectid != key.objectid)
3226 if (btrfs_key_type(&found_key) != key_type)
3228 if (found_key.offset < filp->f_pos)
3231 filp->f_pos = found_key.offset;
3233 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3235 di_total = btrfs_item_size(leaf, item);
3237 while (di_cur < di_total) {
3238 struct btrfs_key location;
3240 name_len = btrfs_dir_name_len(leaf, di);
3241 if (name_len <= sizeof(tmp_name)) {
3242 name_ptr = tmp_name;
3244 name_ptr = kmalloc(name_len, GFP_NOFS);
3250 read_extent_buffer(leaf, name_ptr,
3251 (unsigned long)(di + 1), name_len);
3253 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3254 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3256 /* is this a reference to our own snapshot? If so
3259 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3260 location.objectid == root->root_key.objectid) {
3264 over = filldir(dirent, name_ptr, name_len,
3265 found_key.offset, location.objectid,
3269 if (name_ptr != tmp_name)
3274 di_len = btrfs_dir_name_len(leaf, di) +
3275 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3277 di = (struct btrfs_dir_item *)((char *)di + di_len);
3281 /* Reached end of directory/root. Bump pos past the last item. */
3282 if (key_type == BTRFS_DIR_INDEX_KEY)
3283 filp->f_pos = INT_LIMIT(off_t);
3289 btrfs_free_path(path);
3293 int btrfs_write_inode(struct inode *inode, int wait)
3295 struct btrfs_root *root = BTRFS_I(inode)->root;
3296 struct btrfs_trans_handle *trans;
3299 if (root->fs_info->btree_inode == inode)
3303 trans = btrfs_join_transaction(root, 1);
3304 btrfs_set_trans_block_group(trans, inode);
3305 ret = btrfs_commit_transaction(trans, root);
3311 * This is somewhat expensive, updating the tree every time the
3312 * inode changes. But, it is most likely to find the inode in cache.
3313 * FIXME, needs more benchmarking...there are no reasons other than performance
3314 * to keep or drop this code.
3316 void btrfs_dirty_inode(struct inode *inode)
3318 struct btrfs_root *root = BTRFS_I(inode)->root;
3319 struct btrfs_trans_handle *trans;
3321 trans = btrfs_join_transaction(root, 1);
3322 btrfs_set_trans_block_group(trans, inode);
3323 btrfs_update_inode(trans, root, inode);
3324 btrfs_end_transaction(trans, root);
3328 * find the highest existing sequence number in a directory
3329 * and then set the in-memory index_cnt variable to reflect
3330 * free sequence numbers
3332 static int btrfs_set_inode_index_count(struct inode *inode)
3334 struct btrfs_root *root = BTRFS_I(inode)->root;
3335 struct btrfs_key key, found_key;
3336 struct btrfs_path *path;
3337 struct extent_buffer *leaf;
3340 key.objectid = inode->i_ino;
3341 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3342 key.offset = (u64)-1;
3344 path = btrfs_alloc_path();
3348 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3351 /* FIXME: we should be able to handle this */
3357 * MAGIC NUMBER EXPLANATION:
3358 * since we search a directory based on f_pos we have to start at 2
3359 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3360 * else has to start at 2
3362 if (path->slots[0] == 0) {
3363 BTRFS_I(inode)->index_cnt = 2;
3369 leaf = path->nodes[0];
3370 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3372 if (found_key.objectid != inode->i_ino ||
3373 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3374 BTRFS_I(inode)->index_cnt = 2;
3378 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3380 btrfs_free_path(path);
3385 * helper to find a free sequence number in a given directory. This current
3386 * code is very simple, later versions will do smarter things in the btree
3388 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3392 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3393 ret = btrfs_set_inode_index_count(dir);
3398 *index = BTRFS_I(dir)->index_cnt;
3399 BTRFS_I(dir)->index_cnt++;
3404 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3405 struct btrfs_root *root,
3407 const char *name, int name_len,
3408 u64 ref_objectid, u64 objectid,
3409 u64 alloc_hint, int mode, u64 *index)
3411 struct inode *inode;
3412 struct btrfs_inode_item *inode_item;
3413 struct btrfs_key *location;
3414 struct btrfs_path *path;
3415 struct btrfs_inode_ref *ref;
3416 struct btrfs_key key[2];
3422 path = btrfs_alloc_path();
3425 inode = new_inode(root->fs_info->sb);
3427 return ERR_PTR(-ENOMEM);
3430 ret = btrfs_set_inode_index(dir, index);
3432 return ERR_PTR(ret);
3435 * index_cnt is ignored for everything but a dir,
3436 * btrfs_get_inode_index_count has an explanation for the magic
3439 init_btrfs_i(inode);
3440 BTRFS_I(inode)->index_cnt = 2;
3441 BTRFS_I(inode)->root = root;
3442 BTRFS_I(inode)->generation = trans->transid;
3443 btrfs_set_inode_space_info(root, inode);
3449 BTRFS_I(inode)->block_group =
3450 btrfs_find_block_group(root, 0, alloc_hint, owner);
3451 if ((mode & S_IFREG)) {
3452 if (btrfs_test_opt(root, NODATASUM))
3453 btrfs_set_flag(inode, NODATASUM);
3454 if (btrfs_test_opt(root, NODATACOW))
3455 btrfs_set_flag(inode, NODATACOW);
3458 key[0].objectid = objectid;
3459 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3462 key[1].objectid = objectid;
3463 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3464 key[1].offset = ref_objectid;
3466 sizes[0] = sizeof(struct btrfs_inode_item);
3467 sizes[1] = name_len + sizeof(*ref);
3469 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3473 if (objectid > root->highest_inode)
3474 root->highest_inode = objectid;
3476 inode->i_uid = current_fsuid();
3478 if (dir && (dir->i_mode & S_ISGID)) {
3479 inode->i_gid = dir->i_gid;
3483 inode->i_gid = current_fsgid();
3485 inode->i_mode = mode;
3486 inode->i_ino = objectid;
3487 inode_set_bytes(inode, 0);
3488 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3489 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3490 struct btrfs_inode_item);
3491 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3493 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3494 struct btrfs_inode_ref);
3495 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3496 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3497 ptr = (unsigned long)(ref + 1);
3498 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3500 btrfs_mark_buffer_dirty(path->nodes[0]);
3501 btrfs_free_path(path);
3503 location = &BTRFS_I(inode)->location;
3504 location->objectid = objectid;
3505 location->offset = 0;
3506 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3508 insert_inode_hash(inode);
3512 BTRFS_I(dir)->index_cnt--;
3513 btrfs_free_path(path);
3514 return ERR_PTR(ret);
3517 static inline u8 btrfs_inode_type(struct inode *inode)
3519 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3523 * utility function to add 'inode' into 'parent_inode' with
3524 * a give name and a given sequence number.
3525 * if 'add_backref' is true, also insert a backref from the
3526 * inode to the parent directory.
3528 int btrfs_add_link(struct btrfs_trans_handle *trans,
3529 struct inode *parent_inode, struct inode *inode,
3530 const char *name, int name_len, int add_backref, u64 index)
3533 struct btrfs_key key;
3534 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3536 key.objectid = inode->i_ino;
3537 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3540 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3541 parent_inode->i_ino,
3542 &key, btrfs_inode_type(inode),
3546 ret = btrfs_insert_inode_ref(trans, root,
3549 parent_inode->i_ino,
3552 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3554 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3555 ret = btrfs_update_inode(trans, root, parent_inode);
3560 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3561 struct dentry *dentry, struct inode *inode,
3562 int backref, u64 index)
3564 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3565 inode, dentry->d_name.name,
3566 dentry->d_name.len, backref, index);
3568 d_instantiate(dentry, inode);
3576 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3577 int mode, dev_t rdev)
3579 struct btrfs_trans_handle *trans;
3580 struct btrfs_root *root = BTRFS_I(dir)->root;
3581 struct inode *inode = NULL;
3585 unsigned long nr = 0;
3588 if (!new_valid_dev(rdev))
3591 err = btrfs_check_metadata_free_space(root);
3595 trans = btrfs_start_transaction(root, 1);
3596 btrfs_set_trans_block_group(trans, dir);
3598 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3604 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3606 dentry->d_parent->d_inode->i_ino, objectid,
3607 BTRFS_I(dir)->block_group, mode, &index);
3608 err = PTR_ERR(inode);
3612 err = btrfs_init_inode_security(inode, dir);
3618 btrfs_set_trans_block_group(trans, inode);
3619 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3623 inode->i_op = &btrfs_special_inode_operations;
3624 init_special_inode(inode, inode->i_mode, rdev);
3625 btrfs_update_inode(trans, root, inode);
3627 dir->i_sb->s_dirt = 1;
3628 btrfs_update_inode_block_group(trans, inode);
3629 btrfs_update_inode_block_group(trans, dir);
3631 nr = trans->blocks_used;
3632 btrfs_end_transaction_throttle(trans, root);
3635 inode_dec_link_count(inode);
3638 btrfs_btree_balance_dirty(root, nr);
3642 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3643 int mode, struct nameidata *nd)
3645 struct btrfs_trans_handle *trans;
3646 struct btrfs_root *root = BTRFS_I(dir)->root;
3647 struct inode *inode = NULL;
3650 unsigned long nr = 0;
3654 err = btrfs_check_metadata_free_space(root);
3657 trans = btrfs_start_transaction(root, 1);
3658 btrfs_set_trans_block_group(trans, dir);
3660 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3666 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3668 dentry->d_parent->d_inode->i_ino,
3669 objectid, BTRFS_I(dir)->block_group, mode,
3671 err = PTR_ERR(inode);
3675 err = btrfs_init_inode_security(inode, dir);
3681 btrfs_set_trans_block_group(trans, inode);
3682 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3686 inode->i_mapping->a_ops = &btrfs_aops;
3687 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3688 inode->i_fop = &btrfs_file_operations;
3689 inode->i_op = &btrfs_file_inode_operations;
3690 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3692 dir->i_sb->s_dirt = 1;
3693 btrfs_update_inode_block_group(trans, inode);
3694 btrfs_update_inode_block_group(trans, dir);
3696 nr = trans->blocks_used;
3697 btrfs_end_transaction_throttle(trans, root);
3700 inode_dec_link_count(inode);
3703 btrfs_btree_balance_dirty(root, nr);
3707 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3708 struct dentry *dentry)
3710 struct btrfs_trans_handle *trans;
3711 struct btrfs_root *root = BTRFS_I(dir)->root;
3712 struct inode *inode = old_dentry->d_inode;
3714 unsigned long nr = 0;
3718 if (inode->i_nlink == 0)
3721 btrfs_inc_nlink(inode);
3722 err = btrfs_check_metadata_free_space(root);
3725 err = btrfs_set_inode_index(dir, &index);
3729 trans = btrfs_start_transaction(root, 1);
3731 btrfs_set_trans_block_group(trans, dir);
3732 atomic_inc(&inode->i_count);
3734 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3739 dir->i_sb->s_dirt = 1;
3740 btrfs_update_inode_block_group(trans, dir);
3741 err = btrfs_update_inode(trans, root, inode);
3746 nr = trans->blocks_used;
3747 btrfs_end_transaction_throttle(trans, root);
3750 inode_dec_link_count(inode);
3753 btrfs_btree_balance_dirty(root, nr);
3757 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3759 struct inode *inode = NULL;
3760 struct btrfs_trans_handle *trans;
3761 struct btrfs_root *root = BTRFS_I(dir)->root;
3763 int drop_on_err = 0;
3766 unsigned long nr = 1;
3768 err = btrfs_check_metadata_free_space(root);
3772 trans = btrfs_start_transaction(root, 1);
3773 btrfs_set_trans_block_group(trans, dir);
3775 if (IS_ERR(trans)) {
3776 err = PTR_ERR(trans);
3780 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3786 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3788 dentry->d_parent->d_inode->i_ino, objectid,
3789 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3791 if (IS_ERR(inode)) {
3792 err = PTR_ERR(inode);
3798 err = btrfs_init_inode_security(inode, dir);
3802 inode->i_op = &btrfs_dir_inode_operations;
3803 inode->i_fop = &btrfs_dir_file_operations;
3804 btrfs_set_trans_block_group(trans, inode);
3806 btrfs_i_size_write(inode, 0);
3807 err = btrfs_update_inode(trans, root, inode);
3811 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3812 inode, dentry->d_name.name,
3813 dentry->d_name.len, 0, index);
3817 d_instantiate(dentry, inode);
3819 dir->i_sb->s_dirt = 1;
3820 btrfs_update_inode_block_group(trans, inode);
3821 btrfs_update_inode_block_group(trans, dir);
3824 nr = trans->blocks_used;
3825 btrfs_end_transaction_throttle(trans, root);
3830 btrfs_btree_balance_dirty(root, nr);
3834 /* helper for btfs_get_extent. Given an existing extent in the tree,
3835 * and an extent that you want to insert, deal with overlap and insert
3836 * the new extent into the tree.
3838 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3839 struct extent_map *existing,
3840 struct extent_map *em,
3841 u64 map_start, u64 map_len)
3845 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3846 start_diff = map_start - em->start;
3847 em->start = map_start;
3849 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3850 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3851 em->block_start += start_diff;
3852 em->block_len -= start_diff;
3854 return add_extent_mapping(em_tree, em);
3857 static noinline int uncompress_inline(struct btrfs_path *path,
3858 struct inode *inode, struct page *page,
3859 size_t pg_offset, u64 extent_offset,
3860 struct btrfs_file_extent_item *item)
3863 struct extent_buffer *leaf = path->nodes[0];
3866 unsigned long inline_size;
3869 WARN_ON(pg_offset != 0);
3870 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3871 inline_size = btrfs_file_extent_inline_item_len(leaf,
3872 btrfs_item_nr(leaf, path->slots[0]));
3873 tmp = kmalloc(inline_size, GFP_NOFS);
3874 ptr = btrfs_file_extent_inline_start(item);
3876 read_extent_buffer(leaf, tmp, ptr, inline_size);
3878 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
3879 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3880 inline_size, max_size);
3882 char *kaddr = kmap_atomic(page, KM_USER0);
3883 unsigned long copy_size = min_t(u64,
3884 PAGE_CACHE_SIZE - pg_offset,
3885 max_size - extent_offset);
3886 memset(kaddr + pg_offset, 0, copy_size);
3887 kunmap_atomic(kaddr, KM_USER0);
3894 * a bit scary, this does extent mapping from logical file offset to the disk.
3895 * the ugly parts come from merging extents from the disk with the in-ram
3896 * representation. This gets more complex because of the data=ordered code,
3897 * where the in-ram extents might be locked pending data=ordered completion.
3899 * This also copies inline extents directly into the page.
3902 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3903 size_t pg_offset, u64 start, u64 len,
3909 u64 extent_start = 0;
3911 u64 objectid = inode->i_ino;
3913 struct btrfs_path *path = NULL;
3914 struct btrfs_root *root = BTRFS_I(inode)->root;
3915 struct btrfs_file_extent_item *item;
3916 struct extent_buffer *leaf;
3917 struct btrfs_key found_key;
3918 struct extent_map *em = NULL;
3919 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3920 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3921 struct btrfs_trans_handle *trans = NULL;
3925 spin_lock(&em_tree->lock);
3926 em = lookup_extent_mapping(em_tree, start, len);
3928 em->bdev = root->fs_info->fs_devices->latest_bdev;
3929 spin_unlock(&em_tree->lock);
3932 if (em->start > start || em->start + em->len <= start)
3933 free_extent_map(em);
3934 else if (em->block_start == EXTENT_MAP_INLINE && page)
3935 free_extent_map(em);
3939 em = alloc_extent_map(GFP_NOFS);
3944 em->bdev = root->fs_info->fs_devices->latest_bdev;
3945 em->start = EXTENT_MAP_HOLE;
3946 em->orig_start = EXTENT_MAP_HOLE;
3948 em->block_len = (u64)-1;
3951 path = btrfs_alloc_path();
3955 ret = btrfs_lookup_file_extent(trans, root, path,
3956 objectid, start, trans != NULL);
3963 if (path->slots[0] == 0)
3968 leaf = path->nodes[0];
3969 item = btrfs_item_ptr(leaf, path->slots[0],
3970 struct btrfs_file_extent_item);
3971 /* are we inside the extent that was found? */
3972 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3973 found_type = btrfs_key_type(&found_key);
3974 if (found_key.objectid != objectid ||
3975 found_type != BTRFS_EXTENT_DATA_KEY) {
3979 found_type = btrfs_file_extent_type(leaf, item);
3980 extent_start = found_key.offset;
3981 compressed = btrfs_file_extent_compression(leaf, item);
3982 if (found_type == BTRFS_FILE_EXTENT_REG ||
3983 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3984 extent_end = extent_start +
3985 btrfs_file_extent_num_bytes(leaf, item);
3986 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3988 size = btrfs_file_extent_inline_len(leaf, item);
3989 extent_end = (extent_start + size + root->sectorsize - 1) &
3990 ~((u64)root->sectorsize - 1);
3993 if (start >= extent_end) {
3995 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3996 ret = btrfs_next_leaf(root, path);
4003 leaf = path->nodes[0];
4005 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4006 if (found_key.objectid != objectid ||
4007 found_key.type != BTRFS_EXTENT_DATA_KEY)
4009 if (start + len <= found_key.offset)
4012 em->len = found_key.offset - start;
4016 if (found_type == BTRFS_FILE_EXTENT_REG ||
4017 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4018 em->start = extent_start;
4019 em->len = extent_end - extent_start;
4020 em->orig_start = extent_start -
4021 btrfs_file_extent_offset(leaf, item);
4022 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4024 em->block_start = EXTENT_MAP_HOLE;
4028 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4029 em->block_start = bytenr;
4030 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4033 bytenr += btrfs_file_extent_offset(leaf, item);
4034 em->block_start = bytenr;
4035 em->block_len = em->len;
4036 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4037 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4040 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4044 size_t extent_offset;
4047 em->block_start = EXTENT_MAP_INLINE;
4048 if (!page || create) {
4049 em->start = extent_start;
4050 em->len = extent_end - extent_start;
4054 size = btrfs_file_extent_inline_len(leaf, item);
4055 extent_offset = page_offset(page) + pg_offset - extent_start;
4056 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4057 size - extent_offset);
4058 em->start = extent_start + extent_offset;
4059 em->len = (copy_size + root->sectorsize - 1) &
4060 ~((u64)root->sectorsize - 1);
4061 em->orig_start = EXTENT_MAP_INLINE;
4063 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4064 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4065 if (create == 0 && !PageUptodate(page)) {
4066 if (btrfs_file_extent_compression(leaf, item) ==
4067 BTRFS_COMPRESS_ZLIB) {
4068 ret = uncompress_inline(path, inode, page,
4070 extent_offset, item);
4074 read_extent_buffer(leaf, map + pg_offset, ptr,
4078 flush_dcache_page(page);
4079 } else if (create && PageUptodate(page)) {
4082 free_extent_map(em);
4084 btrfs_release_path(root, path);
4085 trans = btrfs_join_transaction(root, 1);
4089 write_extent_buffer(leaf, map + pg_offset, ptr,
4092 btrfs_mark_buffer_dirty(leaf);
4094 set_extent_uptodate(io_tree, em->start,
4095 extent_map_end(em) - 1, GFP_NOFS);
4098 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4105 em->block_start = EXTENT_MAP_HOLE;
4106 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4108 btrfs_release_path(root, path);
4109 if (em->start > start || extent_map_end(em) <= start) {
4110 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4111 "[%llu %llu]\n", (unsigned long long)em->start,
4112 (unsigned long long)em->len,
4113 (unsigned long long)start,
4114 (unsigned long long)len);
4120 spin_lock(&em_tree->lock);
4121 ret = add_extent_mapping(em_tree, em);
4122 /* it is possible that someone inserted the extent into the tree
4123 * while we had the lock dropped. It is also possible that
4124 * an overlapping map exists in the tree
4126 if (ret == -EEXIST) {
4127 struct extent_map *existing;
4131 existing = lookup_extent_mapping(em_tree, start, len);
4132 if (existing && (existing->start > start ||
4133 existing->start + existing->len <= start)) {
4134 free_extent_map(existing);
4138 existing = lookup_extent_mapping(em_tree, em->start,
4141 err = merge_extent_mapping(em_tree, existing,
4144 free_extent_map(existing);
4146 free_extent_map(em);
4151 free_extent_map(em);
4155 free_extent_map(em);
4160 spin_unlock(&em_tree->lock);
4163 btrfs_free_path(path);
4165 ret = btrfs_end_transaction(trans, root);
4170 free_extent_map(em);
4172 return ERR_PTR(err);
4177 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4178 const struct iovec *iov, loff_t offset,
4179 unsigned long nr_segs)
4184 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4185 __u64 start, __u64 len)
4187 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4190 int btrfs_readpage(struct file *file, struct page *page)
4192 struct extent_io_tree *tree;
4193 tree = &BTRFS_I(page->mapping->host)->io_tree;
4194 return extent_read_full_page(tree, page, btrfs_get_extent);
4197 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4199 struct extent_io_tree *tree;
4202 if (current->flags & PF_MEMALLOC) {
4203 redirty_page_for_writepage(wbc, page);
4207 tree = &BTRFS_I(page->mapping->host)->io_tree;
4208 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4211 int btrfs_writepages(struct address_space *mapping,
4212 struct writeback_control *wbc)
4214 struct extent_io_tree *tree;
4216 tree = &BTRFS_I(mapping->host)->io_tree;
4217 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4221 btrfs_readpages(struct file *file, struct address_space *mapping,
4222 struct list_head *pages, unsigned nr_pages)
4224 struct extent_io_tree *tree;
4225 tree = &BTRFS_I(mapping->host)->io_tree;
4226 return extent_readpages(tree, mapping, pages, nr_pages,
4229 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4231 struct extent_io_tree *tree;
4232 struct extent_map_tree *map;
4235 tree = &BTRFS_I(page->mapping->host)->io_tree;
4236 map = &BTRFS_I(page->mapping->host)->extent_tree;
4237 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4239 ClearPagePrivate(page);
4240 set_page_private(page, 0);
4241 page_cache_release(page);
4246 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4248 if (PageWriteback(page) || PageDirty(page))
4250 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4253 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4255 struct extent_io_tree *tree;
4256 struct btrfs_ordered_extent *ordered;
4257 u64 page_start = page_offset(page);
4258 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4260 wait_on_page_writeback(page);
4261 tree = &BTRFS_I(page->mapping->host)->io_tree;
4263 btrfs_releasepage(page, GFP_NOFS);
4267 lock_extent(tree, page_start, page_end, GFP_NOFS);
4268 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4272 * IO on this page will never be started, so we need
4273 * to account for any ordered extents now
4275 clear_extent_bit(tree, page_start, page_end,
4276 EXTENT_DIRTY | EXTENT_DELALLOC |
4277 EXTENT_LOCKED, 1, 0, GFP_NOFS);
4278 btrfs_finish_ordered_io(page->mapping->host,
4279 page_start, page_end);
4280 btrfs_put_ordered_extent(ordered);
4281 lock_extent(tree, page_start, page_end, GFP_NOFS);
4283 clear_extent_bit(tree, page_start, page_end,
4284 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
4287 __btrfs_releasepage(page, GFP_NOFS);
4289 ClearPageChecked(page);
4290 if (PagePrivate(page)) {
4291 ClearPagePrivate(page);
4292 set_page_private(page, 0);
4293 page_cache_release(page);
4298 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4299 * called from a page fault handler when a page is first dirtied. Hence we must
4300 * be careful to check for EOF conditions here. We set the page up correctly
4301 * for a written page which means we get ENOSPC checking when writing into
4302 * holes and correct delalloc and unwritten extent mapping on filesystems that
4303 * support these features.
4305 * We are not allowed to take the i_mutex here so we have to play games to
4306 * protect against truncate races as the page could now be beyond EOF. Because
4307 * vmtruncate() writes the inode size before removing pages, once we have the
4308 * page lock we can determine safely if the page is beyond EOF. If it is not
4309 * beyond EOF, then the page is guaranteed safe against truncation until we
4312 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
4314 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4315 struct btrfs_root *root = BTRFS_I(inode)->root;
4316 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4317 struct btrfs_ordered_extent *ordered;
4319 unsigned long zero_start;
4325 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4332 size = i_size_read(inode);
4333 page_start = page_offset(page);
4334 page_end = page_start + PAGE_CACHE_SIZE - 1;
4336 if ((page->mapping != inode->i_mapping) ||
4337 (page_start >= size)) {
4338 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4339 /* page got truncated out from underneath us */
4342 wait_on_page_writeback(page);
4344 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4345 set_page_extent_mapped(page);
4348 * we can't set the delalloc bits if there are pending ordered
4349 * extents. Drop our locks and wait for them to finish
4351 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4353 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4355 btrfs_start_ordered_extent(inode, ordered, 1);
4356 btrfs_put_ordered_extent(ordered);
4360 btrfs_set_extent_delalloc(inode, page_start, page_end);
4363 /* page is wholly or partially inside EOF */
4364 if (page_start + PAGE_CACHE_SIZE > size)
4365 zero_start = size & ~PAGE_CACHE_MASK;
4367 zero_start = PAGE_CACHE_SIZE;
4369 if (zero_start != PAGE_CACHE_SIZE) {
4371 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4372 flush_dcache_page(page);
4375 ClearPageChecked(page);
4376 set_page_dirty(page);
4377 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4385 static void btrfs_truncate(struct inode *inode)
4387 struct btrfs_root *root = BTRFS_I(inode)->root;
4389 struct btrfs_trans_handle *trans;
4391 u64 mask = root->sectorsize - 1;
4393 if (!S_ISREG(inode->i_mode))
4395 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4398 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4399 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4401 trans = btrfs_start_transaction(root, 1);
4402 btrfs_set_trans_block_group(trans, inode);
4403 btrfs_i_size_write(inode, inode->i_size);
4405 ret = btrfs_orphan_add(trans, inode);
4408 /* FIXME, add redo link to tree so we don't leak on crash */
4409 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4410 BTRFS_EXTENT_DATA_KEY);
4411 btrfs_update_inode(trans, root, inode);
4413 ret = btrfs_orphan_del(trans, inode);
4417 nr = trans->blocks_used;
4418 ret = btrfs_end_transaction_throttle(trans, root);
4420 btrfs_btree_balance_dirty(root, nr);
4424 * create a new subvolume directory/inode (helper for the ioctl).
4426 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4427 struct btrfs_root *new_root, struct dentry *dentry,
4428 u64 new_dirid, u64 alloc_hint)
4430 struct inode *inode;
4434 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4435 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4437 return PTR_ERR(inode);
4438 inode->i_op = &btrfs_dir_inode_operations;
4439 inode->i_fop = &btrfs_dir_file_operations;
4442 btrfs_i_size_write(inode, 0);
4444 error = btrfs_update_inode(trans, new_root, inode);
4448 d_instantiate(dentry, inode);
4452 /* helper function for file defrag and space balancing. This
4453 * forces readahead on a given range of bytes in an inode
4455 unsigned long btrfs_force_ra(struct address_space *mapping,
4456 struct file_ra_state *ra, struct file *file,
4457 pgoff_t offset, pgoff_t last_index)
4459 pgoff_t req_size = last_index - offset + 1;
4461 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4462 return offset + req_size;
4465 struct inode *btrfs_alloc_inode(struct super_block *sb)
4467 struct btrfs_inode *ei;
4469 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4473 ei->logged_trans = 0;
4474 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4475 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4476 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4477 INIT_LIST_HEAD(&ei->i_orphan);
4478 return &ei->vfs_inode;
4481 void btrfs_destroy_inode(struct inode *inode)
4483 struct btrfs_ordered_extent *ordered;
4484 WARN_ON(!list_empty(&inode->i_dentry));
4485 WARN_ON(inode->i_data.nrpages);
4487 if (BTRFS_I(inode)->i_acl &&
4488 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4489 posix_acl_release(BTRFS_I(inode)->i_acl);
4490 if (BTRFS_I(inode)->i_default_acl &&
4491 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4492 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4494 spin_lock(&BTRFS_I(inode)->root->list_lock);
4495 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4496 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4497 " list\n", inode->i_ino);
4500 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4503 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4507 printk(KERN_ERR "btrfs found ordered "
4508 "extent %llu %llu on inode cleanup\n",
4509 (unsigned long long)ordered->file_offset,
4510 (unsigned long long)ordered->len);
4511 btrfs_remove_ordered_extent(inode, ordered);
4512 btrfs_put_ordered_extent(ordered);
4513 btrfs_put_ordered_extent(ordered);
4516 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4517 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4520 static void init_once(void *foo)
4522 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4524 inode_init_once(&ei->vfs_inode);
4527 void btrfs_destroy_cachep(void)
4529 if (btrfs_inode_cachep)
4530 kmem_cache_destroy(btrfs_inode_cachep);
4531 if (btrfs_trans_handle_cachep)
4532 kmem_cache_destroy(btrfs_trans_handle_cachep);
4533 if (btrfs_transaction_cachep)
4534 kmem_cache_destroy(btrfs_transaction_cachep);
4535 if (btrfs_bit_radix_cachep)
4536 kmem_cache_destroy(btrfs_bit_radix_cachep);
4537 if (btrfs_path_cachep)
4538 kmem_cache_destroy(btrfs_path_cachep);
4541 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4542 unsigned long extra_flags,
4543 void (*ctor)(void *))
4545 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4546 SLAB_MEM_SPREAD | extra_flags), ctor);
4549 int btrfs_init_cachep(void)
4551 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4552 sizeof(struct btrfs_inode),
4554 if (!btrfs_inode_cachep)
4556 btrfs_trans_handle_cachep =
4557 btrfs_cache_create("btrfs_trans_handle_cache",
4558 sizeof(struct btrfs_trans_handle),
4560 if (!btrfs_trans_handle_cachep)
4562 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4563 sizeof(struct btrfs_transaction),
4565 if (!btrfs_transaction_cachep)
4567 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4568 sizeof(struct btrfs_path),
4570 if (!btrfs_path_cachep)
4572 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4573 SLAB_DESTROY_BY_RCU, NULL);
4574 if (!btrfs_bit_radix_cachep)
4578 btrfs_destroy_cachep();
4582 static int btrfs_getattr(struct vfsmount *mnt,
4583 struct dentry *dentry, struct kstat *stat)
4585 struct inode *inode = dentry->d_inode;
4586 generic_fillattr(inode, stat);
4587 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4588 stat->blksize = PAGE_CACHE_SIZE;
4589 stat->blocks = (inode_get_bytes(inode) +
4590 BTRFS_I(inode)->delalloc_bytes) >> 9;
4594 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4595 struct inode *new_dir, struct dentry *new_dentry)
4597 struct btrfs_trans_handle *trans;
4598 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4599 struct inode *new_inode = new_dentry->d_inode;
4600 struct inode *old_inode = old_dentry->d_inode;
4601 struct timespec ctime = CURRENT_TIME;
4605 /* we're not allowed to rename between subvolumes */
4606 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4607 BTRFS_I(new_dir)->root->root_key.objectid)
4610 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4611 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4615 /* to rename a snapshot or subvolume, we need to juggle the
4616 * backrefs. This isn't coded yet
4618 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4621 ret = btrfs_check_metadata_free_space(root);
4625 trans = btrfs_start_transaction(root, 1);
4627 btrfs_set_trans_block_group(trans, new_dir);
4629 btrfs_inc_nlink(old_dentry->d_inode);
4630 old_dir->i_ctime = old_dir->i_mtime = ctime;
4631 new_dir->i_ctime = new_dir->i_mtime = ctime;
4632 old_inode->i_ctime = ctime;
4634 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4635 old_dentry->d_name.name,
4636 old_dentry->d_name.len);
4641 new_inode->i_ctime = CURRENT_TIME;
4642 ret = btrfs_unlink_inode(trans, root, new_dir,
4643 new_dentry->d_inode,
4644 new_dentry->d_name.name,
4645 new_dentry->d_name.len);
4648 if (new_inode->i_nlink == 0) {
4649 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4655 ret = btrfs_set_inode_index(new_dir, &index);
4659 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4660 old_inode, new_dentry->d_name.name,
4661 new_dentry->d_name.len, 1, index);
4666 btrfs_end_transaction_throttle(trans, root);
4672 * some fairly slow code that needs optimization. This walks the list
4673 * of all the inodes with pending delalloc and forces them to disk.
4675 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4677 struct list_head *head = &root->fs_info->delalloc_inodes;
4678 struct btrfs_inode *binode;
4679 struct inode *inode;
4681 if (root->fs_info->sb->s_flags & MS_RDONLY)
4684 spin_lock(&root->fs_info->delalloc_lock);
4685 while (!list_empty(head)) {
4686 binode = list_entry(head->next, struct btrfs_inode,
4688 inode = igrab(&binode->vfs_inode);
4690 list_del_init(&binode->delalloc_inodes);
4691 spin_unlock(&root->fs_info->delalloc_lock);
4693 filemap_flush(inode->i_mapping);
4697 spin_lock(&root->fs_info->delalloc_lock);
4699 spin_unlock(&root->fs_info->delalloc_lock);
4701 /* the filemap_flush will queue IO into the worker threads, but
4702 * we have to make sure the IO is actually started and that
4703 * ordered extents get created before we return
4705 atomic_inc(&root->fs_info->async_submit_draining);
4706 while (atomic_read(&root->fs_info->nr_async_submits) ||
4707 atomic_read(&root->fs_info->async_delalloc_pages)) {
4708 wait_event(root->fs_info->async_submit_wait,
4709 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4710 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4712 atomic_dec(&root->fs_info->async_submit_draining);
4716 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4717 const char *symname)
4719 struct btrfs_trans_handle *trans;
4720 struct btrfs_root *root = BTRFS_I(dir)->root;
4721 struct btrfs_path *path;
4722 struct btrfs_key key;
4723 struct inode *inode = NULL;
4731 struct btrfs_file_extent_item *ei;
4732 struct extent_buffer *leaf;
4733 unsigned long nr = 0;
4735 name_len = strlen(symname) + 1;
4736 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4737 return -ENAMETOOLONG;
4739 err = btrfs_check_metadata_free_space(root);
4743 trans = btrfs_start_transaction(root, 1);
4744 btrfs_set_trans_block_group(trans, dir);
4746 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4752 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4754 dentry->d_parent->d_inode->i_ino, objectid,
4755 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4757 err = PTR_ERR(inode);
4761 err = btrfs_init_inode_security(inode, dir);
4767 btrfs_set_trans_block_group(trans, inode);
4768 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4772 inode->i_mapping->a_ops = &btrfs_aops;
4773 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4774 inode->i_fop = &btrfs_file_operations;
4775 inode->i_op = &btrfs_file_inode_operations;
4776 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4778 dir->i_sb->s_dirt = 1;
4779 btrfs_update_inode_block_group(trans, inode);
4780 btrfs_update_inode_block_group(trans, dir);
4784 path = btrfs_alloc_path();
4786 key.objectid = inode->i_ino;
4788 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4789 datasize = btrfs_file_extent_calc_inline_size(name_len);
4790 err = btrfs_insert_empty_item(trans, root, path, &key,
4796 leaf = path->nodes[0];
4797 ei = btrfs_item_ptr(leaf, path->slots[0],
4798 struct btrfs_file_extent_item);
4799 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4800 btrfs_set_file_extent_type(leaf, ei,
4801 BTRFS_FILE_EXTENT_INLINE);
4802 btrfs_set_file_extent_encryption(leaf, ei, 0);
4803 btrfs_set_file_extent_compression(leaf, ei, 0);
4804 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4805 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4807 ptr = btrfs_file_extent_inline_start(ei);
4808 write_extent_buffer(leaf, symname, ptr, name_len);
4809 btrfs_mark_buffer_dirty(leaf);
4810 btrfs_free_path(path);
4812 inode->i_op = &btrfs_symlink_inode_operations;
4813 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4814 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4815 inode_set_bytes(inode, name_len);
4816 btrfs_i_size_write(inode, name_len - 1);
4817 err = btrfs_update_inode(trans, root, inode);
4822 nr = trans->blocks_used;
4823 btrfs_end_transaction_throttle(trans, root);
4826 inode_dec_link_count(inode);
4829 btrfs_btree_balance_dirty(root, nr);
4833 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4834 u64 alloc_hint, int mode)
4836 struct btrfs_trans_handle *trans;
4837 struct btrfs_root *root = BTRFS_I(inode)->root;
4838 struct btrfs_key ins;
4840 u64 cur_offset = start;
4841 u64 num_bytes = end - start;
4844 trans = btrfs_join_transaction(root, 1);
4846 btrfs_set_trans_block_group(trans, inode);
4848 while (num_bytes > 0) {
4849 alloc_size = min(num_bytes, root->fs_info->max_extent);
4850 ret = btrfs_reserve_extent(trans, root, alloc_size,
4851 root->sectorsize, 0, alloc_hint,
4857 ret = insert_reserved_file_extent(trans, inode,
4858 cur_offset, ins.objectid,
4859 ins.offset, ins.offset,
4860 ins.offset, 0, 0, 0,
4861 BTRFS_FILE_EXTENT_PREALLOC);
4863 num_bytes -= ins.offset;
4864 cur_offset += ins.offset;
4865 alloc_hint = ins.objectid + ins.offset;
4868 if (cur_offset > start) {
4869 inode->i_ctime = CURRENT_TIME;
4870 btrfs_set_flag(inode, PREALLOC);
4871 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4872 cur_offset > i_size_read(inode))
4873 btrfs_i_size_write(inode, cur_offset);
4874 ret = btrfs_update_inode(trans, root, inode);
4878 btrfs_end_transaction(trans, root);
4882 static long btrfs_fallocate(struct inode *inode, int mode,
4883 loff_t offset, loff_t len)
4890 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
4891 struct extent_map *em;
4894 alloc_start = offset & ~mask;
4895 alloc_end = (offset + len + mask) & ~mask;
4897 mutex_lock(&inode->i_mutex);
4898 if (alloc_start > inode->i_size) {
4899 ret = btrfs_cont_expand(inode, alloc_start);
4905 struct btrfs_ordered_extent *ordered;
4906 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
4907 alloc_end - 1, GFP_NOFS);
4908 ordered = btrfs_lookup_first_ordered_extent(inode,
4911 ordered->file_offset + ordered->len > alloc_start &&
4912 ordered->file_offset < alloc_end) {
4913 btrfs_put_ordered_extent(ordered);
4914 unlock_extent(&BTRFS_I(inode)->io_tree,
4915 alloc_start, alloc_end - 1, GFP_NOFS);
4916 btrfs_wait_ordered_range(inode, alloc_start,
4917 alloc_end - alloc_start);
4920 btrfs_put_ordered_extent(ordered);
4925 cur_offset = alloc_start;
4927 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4928 alloc_end - cur_offset, 0);
4929 BUG_ON(IS_ERR(em) || !em);
4930 last_byte = min(extent_map_end(em), alloc_end);
4931 last_byte = (last_byte + mask) & ~mask;
4932 if (em->block_start == EXTENT_MAP_HOLE) {
4933 ret = prealloc_file_range(inode, cur_offset,
4934 last_byte, alloc_hint, mode);
4936 free_extent_map(em);
4940 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
4941 alloc_hint = em->block_start;
4942 free_extent_map(em);
4944 cur_offset = last_byte;
4945 if (cur_offset >= alloc_end) {
4950 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
4953 mutex_unlock(&inode->i_mutex);
4957 static int btrfs_set_page_dirty(struct page *page)
4959 return __set_page_dirty_nobuffers(page);
4962 static int btrfs_permission(struct inode *inode, int mask)
4964 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4966 return generic_permission(inode, mask, btrfs_check_acl);
4969 static struct inode_operations btrfs_dir_inode_operations = {
4970 .getattr = btrfs_getattr,
4971 .lookup = btrfs_lookup,
4972 .create = btrfs_create,
4973 .unlink = btrfs_unlink,
4975 .mkdir = btrfs_mkdir,
4976 .rmdir = btrfs_rmdir,
4977 .rename = btrfs_rename,
4978 .symlink = btrfs_symlink,
4979 .setattr = btrfs_setattr,
4980 .mknod = btrfs_mknod,
4981 .setxattr = btrfs_setxattr,
4982 .getxattr = btrfs_getxattr,
4983 .listxattr = btrfs_listxattr,
4984 .removexattr = btrfs_removexattr,
4985 .permission = btrfs_permission,
4987 static struct inode_operations btrfs_dir_ro_inode_operations = {
4988 .lookup = btrfs_lookup,
4989 .permission = btrfs_permission,
4991 static struct file_operations btrfs_dir_file_operations = {
4992 .llseek = generic_file_llseek,
4993 .read = generic_read_dir,
4994 .readdir = btrfs_real_readdir,
4995 .unlocked_ioctl = btrfs_ioctl,
4996 #ifdef CONFIG_COMPAT
4997 .compat_ioctl = btrfs_ioctl,
4999 .release = btrfs_release_file,
5000 .fsync = btrfs_sync_file,
5003 static struct extent_io_ops btrfs_extent_io_ops = {
5004 .fill_delalloc = run_delalloc_range,
5005 .submit_bio_hook = btrfs_submit_bio_hook,
5006 .merge_bio_hook = btrfs_merge_bio_hook,
5007 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5008 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5009 .writepage_start_hook = btrfs_writepage_start_hook,
5010 .readpage_io_failed_hook = btrfs_io_failed_hook,
5011 .set_bit_hook = btrfs_set_bit_hook,
5012 .clear_bit_hook = btrfs_clear_bit_hook,
5016 * btrfs doesn't support the bmap operation because swapfiles
5017 * use bmap to make a mapping of extents in the file. They assume
5018 * these extents won't change over the life of the file and they
5019 * use the bmap result to do IO directly to the drive.
5021 * the btrfs bmap call would return logical addresses that aren't
5022 * suitable for IO and they also will change frequently as COW
5023 * operations happen. So, swapfile + btrfs == corruption.
5025 * For now we're avoiding this by dropping bmap.
5027 static struct address_space_operations btrfs_aops = {
5028 .readpage = btrfs_readpage,
5029 .writepage = btrfs_writepage,
5030 .writepages = btrfs_writepages,
5031 .readpages = btrfs_readpages,
5032 .sync_page = block_sync_page,
5033 .direct_IO = btrfs_direct_IO,
5034 .invalidatepage = btrfs_invalidatepage,
5035 .releasepage = btrfs_releasepage,
5036 .set_page_dirty = btrfs_set_page_dirty,
5039 static struct address_space_operations btrfs_symlink_aops = {
5040 .readpage = btrfs_readpage,
5041 .writepage = btrfs_writepage,
5042 .invalidatepage = btrfs_invalidatepage,
5043 .releasepage = btrfs_releasepage,
5046 static struct inode_operations btrfs_file_inode_operations = {
5047 .truncate = btrfs_truncate,
5048 .getattr = btrfs_getattr,
5049 .setattr = btrfs_setattr,
5050 .setxattr = btrfs_setxattr,
5051 .getxattr = btrfs_getxattr,
5052 .listxattr = btrfs_listxattr,
5053 .removexattr = btrfs_removexattr,
5054 .permission = btrfs_permission,
5055 .fallocate = btrfs_fallocate,
5056 .fiemap = btrfs_fiemap,
5058 static struct inode_operations btrfs_special_inode_operations = {
5059 .getattr = btrfs_getattr,
5060 .setattr = btrfs_setattr,
5061 .permission = btrfs_permission,
5062 .setxattr = btrfs_setxattr,
5063 .getxattr = btrfs_getxattr,
5064 .listxattr = btrfs_listxattr,
5065 .removexattr = btrfs_removexattr,
5067 static struct inode_operations btrfs_symlink_inode_operations = {
5068 .readlink = generic_readlink,
5069 .follow_link = page_follow_link_light,
5070 .put_link = page_put_link,
5071 .permission = btrfs_permission,
5072 .setxattr = btrfs_setxattr,
5073 .getxattr = btrfs_getxattr,
5074 .listxattr = btrfs_listxattr,
5075 .removexattr = btrfs_removexattr,