2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include <linux/posix_acl.h>
40 #include <linux/falloc.h>
43 #include "transaction.h"
44 #include "btrfs_inode.h"
46 #include "print-tree.h"
48 #include "ordered-data.h"
52 #include "ref-cache.h"
53 #include "compression.h"
55 struct btrfs_iget_args {
57 struct btrfs_root *root;
60 static struct inode_operations btrfs_dir_inode_operations;
61 static struct inode_operations btrfs_symlink_inode_operations;
62 static struct inode_operations btrfs_dir_ro_inode_operations;
63 static struct inode_operations btrfs_special_inode_operations;
64 static struct inode_operations btrfs_file_inode_operations;
65 static struct address_space_operations btrfs_aops;
66 static struct address_space_operations btrfs_symlink_aops;
67 static struct file_operations btrfs_dir_file_operations;
68 static struct extent_io_ops btrfs_extent_io_ops;
70 static struct kmem_cache *btrfs_inode_cachep;
71 struct kmem_cache *btrfs_trans_handle_cachep;
72 struct kmem_cache *btrfs_transaction_cachep;
73 struct kmem_cache *btrfs_bit_radix_cachep;
74 struct kmem_cache *btrfs_path_cachep;
77 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
78 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
79 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
80 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
81 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
82 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
83 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
84 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
87 static void btrfs_truncate(struct inode *inode);
88 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
91 * a very lame attempt at stopping writes when the FS is 85% full. There
92 * are countless ways this is incorrect, but it is better than nothing.
94 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
103 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
104 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
105 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
113 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
115 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
120 * this does all the hard work for inserting an inline extent into
121 * the btree. The caller should have done a btrfs_drop_extents so that
122 * no overlapping inline items exist in the btree
124 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
125 struct btrfs_root *root, struct inode *inode,
126 u64 start, size_t size, size_t compressed_size,
127 struct page **compressed_pages)
129 struct btrfs_key key;
130 struct btrfs_path *path;
131 struct extent_buffer *leaf;
132 struct page *page = NULL;
135 struct btrfs_file_extent_item *ei;
138 size_t cur_size = size;
140 unsigned long offset;
141 int use_compress = 0;
143 if (compressed_size && compressed_pages) {
145 cur_size = compressed_size;
148 path = btrfs_alloc_path(); if (!path)
151 btrfs_set_trans_block_group(trans, inode);
153 key.objectid = inode->i_ino;
155 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
156 inode_add_bytes(inode, size);
157 datasize = btrfs_file_extent_calc_inline_size(cur_size);
159 inode_add_bytes(inode, size);
160 ret = btrfs_insert_empty_item(trans, root, path, &key,
165 printk("got bad ret %d\n", ret);
168 leaf = path->nodes[0];
169 ei = btrfs_item_ptr(leaf, path->slots[0],
170 struct btrfs_file_extent_item);
171 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
172 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
173 btrfs_set_file_extent_encryption(leaf, ei, 0);
174 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
175 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
176 ptr = btrfs_file_extent_inline_start(ei);
181 while(compressed_size > 0) {
182 cpage = compressed_pages[i];
183 cur_size = min(compressed_size,
187 write_extent_buffer(leaf, kaddr, ptr, cur_size);
192 compressed_size -= cur_size;
194 btrfs_set_file_extent_compression(leaf, ei,
195 BTRFS_COMPRESS_ZLIB);
197 page = find_get_page(inode->i_mapping,
198 start >> PAGE_CACHE_SHIFT);
199 btrfs_set_file_extent_compression(leaf, ei, 0);
200 kaddr = kmap_atomic(page, KM_USER0);
201 offset = start & (PAGE_CACHE_SIZE - 1);
202 write_extent_buffer(leaf, kaddr + offset, ptr, size);
203 kunmap_atomic(kaddr, KM_USER0);
204 page_cache_release(page);
206 btrfs_mark_buffer_dirty(leaf);
207 btrfs_free_path(path);
209 BTRFS_I(inode)->disk_i_size = inode->i_size;
210 btrfs_update_inode(trans, root, inode);
213 btrfs_free_path(path);
219 * conditionally insert an inline extent into the file. This
220 * does the checks required to make sure the data is small enough
221 * to fit as an inline extent.
223 static int cow_file_range_inline(struct btrfs_trans_handle *trans,
224 struct btrfs_root *root,
225 struct inode *inode, u64 start, u64 end,
226 size_t compressed_size,
227 struct page **compressed_pages)
229 u64 isize = i_size_read(inode);
230 u64 actual_end = min(end + 1, isize);
231 u64 inline_len = actual_end - start;
232 u64 aligned_end = (end + root->sectorsize - 1) &
233 ~((u64)root->sectorsize - 1);
235 u64 data_len = inline_len;
239 data_len = compressed_size;
242 actual_end >= PAGE_CACHE_SIZE ||
243 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
245 (actual_end & (root->sectorsize - 1)) == 0) ||
247 data_len > root->fs_info->max_inline) {
251 ret = btrfs_drop_extents(trans, root, inode, start,
252 aligned_end, start, &hint_byte);
255 if (isize > actual_end)
256 inline_len = min_t(u64, isize, actual_end);
257 ret = insert_inline_extent(trans, root, inode, start,
258 inline_len, compressed_size,
261 btrfs_drop_extent_cache(inode, start, aligned_end, 0);
266 * when extent_io.c finds a delayed allocation range in the file,
267 * the call backs end up in this code. The basic idea is to
268 * allocate extents on disk for the range, and create ordered data structs
269 * in ram to track those extents.
271 * locked_page is the page that writepage had locked already. We use
272 * it to make sure we don't do extra locks or unlocks.
274 * *page_started is set to one if we unlock locked_page and do everything
275 * required to start IO on it. It may be clean and already done with
278 static int cow_file_range(struct inode *inode, struct page *locked_page,
279 u64 start, u64 end, int *page_started)
281 struct btrfs_root *root = BTRFS_I(inode)->root;
282 struct btrfs_trans_handle *trans;
285 unsigned long ram_size;
289 u64 blocksize = root->sectorsize;
291 struct btrfs_key ins;
292 struct extent_map *em;
293 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
295 struct page **pages = NULL;
296 unsigned long nr_pages;
297 unsigned long nr_pages_ret = 0;
298 unsigned long total_compressed = 0;
299 unsigned long total_in = 0;
300 unsigned long max_compressed = 128 * 1024;
301 unsigned long max_uncompressed = 256 * 1024;
306 trans = btrfs_join_transaction(root, 1);
308 btrfs_set_trans_block_group(trans, inode);
312 * compression made this loop a bit ugly, but the basic idea is to
313 * compress some pages but keep the total size of the compressed
314 * extent relatively small. If compression is off, this goto target
319 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
320 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
322 actual_end = min_t(u64, i_size_read(inode), end + 1);
323 total_compressed = actual_end - start;
325 /* we want to make sure that amount of ram required to uncompress
326 * an extent is reasonable, so we limit the total size in ram
327 * of a compressed extent to 256k
329 total_compressed = min(total_compressed, max_uncompressed);
330 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
331 num_bytes = max(blocksize, num_bytes);
332 disk_num_bytes = num_bytes;
336 /* we do compression for mount -o compress and when the
337 * inode has not been flagged as nocompress
339 if (!btrfs_test_flag(inode, NOCOMPRESS) &&
340 btrfs_test_opt(root, COMPRESS)) {
342 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
344 /* we want to make sure the amount of IO required to satisfy
345 * a random read is reasonably small, so we limit the size
346 * of a compressed extent to 128k
348 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
349 total_compressed, pages,
350 nr_pages, &nr_pages_ret,
356 unsigned long offset = total_compressed &
357 (PAGE_CACHE_SIZE - 1);
358 struct page *page = pages[nr_pages_ret - 1];
361 /* zero the tail end of the last page, we might be
362 * sending it down to disk
365 kaddr = kmap_atomic(page, KM_USER0);
366 memset(kaddr + offset, 0,
367 PAGE_CACHE_SIZE - offset);
368 kunmap_atomic(kaddr, KM_USER0);
374 /* lets try to make an inline extent */
375 if (ret || total_in < (end - start + 1)) {
376 /* we didn't compress the entire range, try
377 * to make an uncompressed inline extent. This
378 * is almost sure to fail, but maybe inline sizes
379 * will get bigger later
381 ret = cow_file_range_inline(trans, root, inode,
382 start, end, 0, NULL);
384 ret = cow_file_range_inline(trans, root, inode,
386 total_compressed, pages);
389 extent_clear_unlock_delalloc(inode,
390 &BTRFS_I(inode)->io_tree,
401 * we aren't doing an inline extent round the compressed size
402 * up to a block size boundary so the allocator does sane
405 total_compressed = (total_compressed + blocksize - 1) &
409 * one last check to make sure the compression is really a
410 * win, compare the page count read with the blocks on disk
412 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
413 ~(PAGE_CACHE_SIZE - 1);
414 if (total_compressed >= total_in) {
417 disk_num_bytes = total_compressed;
418 num_bytes = total_in;
421 if (!will_compress && pages) {
423 * the compression code ran but failed to make things smaller,
424 * free any pages it allocated and our page pointer array
426 for (i = 0; i < nr_pages_ret; i++) {
427 WARN_ON(pages[i]->mapping);
428 page_cache_release(pages[i]);
432 total_compressed = 0;
435 /* flag the file so we don't compress in the future */
436 btrfs_set_flag(inode, NOCOMPRESS);
439 BUG_ON(disk_num_bytes >
440 btrfs_super_total_bytes(&root->fs_info->super_copy));
442 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
444 while(disk_num_bytes > 0) {
445 unsigned long min_bytes;
448 * the max size of a compressed extent is pretty small,
449 * make the code a little less complex by forcing
450 * the allocator to find a whole compressed extent at once
453 min_bytes = disk_num_bytes;
455 min_bytes = root->sectorsize;
457 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
458 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
459 min_bytes, 0, alloc_hint,
463 goto free_pages_out_fail;
465 em = alloc_extent_map(GFP_NOFS);
469 ram_size = num_bytes;
472 /* ramsize == disk size */
473 ram_size = ins.offset;
474 em->len = ins.offset;
477 em->block_start = ins.objectid;
478 em->block_len = ins.offset;
479 em->bdev = root->fs_info->fs_devices->latest_bdev;
480 set_bit(EXTENT_FLAG_PINNED, &em->flags);
483 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
486 spin_lock(&em_tree->lock);
487 ret = add_extent_mapping(em_tree, em);
488 spin_unlock(&em_tree->lock);
489 if (ret != -EEXIST) {
493 btrfs_drop_extent_cache(inode, start,
494 start + ram_size - 1, 0);
497 cur_alloc_size = ins.offset;
498 ordered_type = will_compress ? BTRFS_ORDERED_COMPRESSED : 0;
499 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
500 ram_size, cur_alloc_size,
504 if (disk_num_bytes < cur_alloc_size) {
505 printk("num_bytes %Lu cur_alloc %Lu\n", disk_num_bytes,
512 * we're doing compression, we and we need to
513 * submit the compressed extents down to the device.
515 * We lock down all the file pages, clearing their
516 * dirty bits and setting them writeback. Everyone
517 * that wants to modify the page will wait on the
518 * ordered extent above.
520 * The writeback bits on the file pages are
521 * cleared when the compressed pages are on disk
523 btrfs_end_transaction(trans, root);
525 if (start <= page_offset(locked_page) &&
526 page_offset(locked_page) < start + ram_size) {
530 extent_clear_unlock_delalloc(inode,
531 &BTRFS_I(inode)->io_tree,
533 start + ram_size - 1,
536 ret = btrfs_submit_compressed_write(inode, start,
537 ram_size, ins.objectid,
538 cur_alloc_size, pages,
542 trans = btrfs_join_transaction(root, 1);
543 if (start + ram_size < end) {
545 alloc_hint = ins.objectid + ins.offset;
546 /* pages will be freed at end_bio time */
550 /* we've written everything, time to go */
554 /* we're not doing compressed IO, don't unlock the first
555 * page (which the caller expects to stay locked), don't
556 * clear any dirty bits and don't set any writeback bits
558 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
559 start, start + ram_size - 1,
560 locked_page, 0, 0, 0);
561 disk_num_bytes -= cur_alloc_size;
562 num_bytes -= cur_alloc_size;
563 alloc_hint = ins.objectid + ins.offset;
564 start += cur_alloc_size;
569 btrfs_end_transaction(trans, root);
574 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
575 start, end, locked_page, 0, 0, 0);
577 for (i = 0; i < nr_pages_ret; i++) {
578 WARN_ON(pages[i]->mapping);
579 page_cache_release(pages[i]);
588 * when nowcow writeback call back. This checks for snapshots or COW copies
589 * of the extents that exist in the file, and COWs the file as required.
591 * If no cow copies or snapshots exist, we write directly to the existing
594 static int run_delalloc_nocow(struct inode *inode, struct page *locked_page,
595 u64 start, u64 end, int *page_started, int force)
597 struct btrfs_root *root = BTRFS_I(inode)->root;
598 struct btrfs_trans_handle *trans;
599 struct extent_buffer *leaf;
600 struct btrfs_path *path;
601 struct btrfs_file_extent_item *fi;
602 struct btrfs_key found_key;
614 path = btrfs_alloc_path();
616 trans = btrfs_join_transaction(root, 1);
622 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
625 if (ret > 0 && path->slots[0] > 0 && check_prev) {
626 leaf = path->nodes[0];
627 btrfs_item_key_to_cpu(leaf, &found_key,
629 if (found_key.objectid == inode->i_ino &&
630 found_key.type == BTRFS_EXTENT_DATA_KEY)
635 leaf = path->nodes[0];
636 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
637 ret = btrfs_next_leaf(root, path);
642 leaf = path->nodes[0];
647 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
649 if (found_key.objectid > inode->i_ino ||
650 found_key.type > BTRFS_EXTENT_DATA_KEY ||
651 found_key.offset > end)
654 if (found_key.offset > cur_offset) {
655 extent_end = found_key.offset;
659 fi = btrfs_item_ptr(leaf, path->slots[0],
660 struct btrfs_file_extent_item);
661 extent_type = btrfs_file_extent_type(leaf, fi);
663 if (extent_type == BTRFS_FILE_EXTENT_REG ||
664 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
665 struct btrfs_block_group_cache *block_group;
666 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
667 extent_end = found_key.offset +
668 btrfs_file_extent_num_bytes(leaf, fi);
669 if (extent_end <= start) {
673 if (btrfs_file_extent_compression(leaf, fi) ||
674 btrfs_file_extent_encryption(leaf, fi) ||
675 btrfs_file_extent_other_encoding(leaf, fi))
677 if (disk_bytenr == 0)
679 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
681 if (btrfs_cross_ref_exist(trans, root, disk_bytenr))
683 block_group = btrfs_lookup_block_group(root->fs_info,
685 if (!block_group || block_group->ro)
687 disk_bytenr += btrfs_file_extent_offset(leaf, fi);
689 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
690 extent_end = found_key.offset +
691 btrfs_file_extent_inline_len(leaf, fi);
692 extent_end = ALIGN(extent_end, root->sectorsize);
697 if (extent_end <= start) {
702 if (cow_start == (u64)-1)
703 cow_start = cur_offset;
704 cur_offset = extent_end;
705 if (cur_offset > end)
711 btrfs_release_path(root, path);
712 if (cow_start != (u64)-1) {
713 ret = cow_file_range(inode, locked_page, cow_start,
714 found_key.offset - 1, page_started);
719 disk_bytenr += cur_offset - found_key.offset;
720 num_bytes = min(end + 1, extent_end) - cur_offset;
721 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
722 struct extent_map *em;
723 struct extent_map_tree *em_tree;
724 em_tree = &BTRFS_I(inode)->extent_tree;
725 em = alloc_extent_map(GFP_NOFS);
726 em->start = cur_offset;
728 em->block_len = num_bytes;
729 em->block_start = disk_bytenr;
730 em->bdev = root->fs_info->fs_devices->latest_bdev;
731 set_bit(EXTENT_FLAG_PINNED, &em->flags);
733 spin_lock(&em_tree->lock);
734 ret = add_extent_mapping(em_tree, em);
735 spin_unlock(&em_tree->lock);
736 if (ret != -EEXIST) {
740 btrfs_drop_extent_cache(inode, em->start,
741 em->start + em->len - 1, 0);
743 type = BTRFS_ORDERED_PREALLOC;
745 type = BTRFS_ORDERED_NOCOW;
748 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
749 num_bytes, num_bytes, type);
751 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
752 cur_offset, cur_offset + num_bytes - 1,
753 locked_page, 0, 0, 0);
754 cur_offset = extent_end;
755 if (cur_offset > end)
758 btrfs_release_path(root, path);
760 if (cur_offset <= end && cow_start == (u64)-1)
761 cow_start = cur_offset;
762 if (cow_start != (u64)-1) {
763 ret = cow_file_range(inode, locked_page, cow_start, end,
768 ret = btrfs_end_transaction(trans, root);
770 btrfs_free_path(path);
775 * extent_io.c call back to do delayed allocation processing
777 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
778 u64 start, u64 end, int *page_started)
780 struct btrfs_root *root = BTRFS_I(inode)->root;
783 if (btrfs_test_opt(root, NODATACOW) ||
784 btrfs_test_flag(inode, NODATACOW))
785 ret = run_delalloc_nocow(inode, locked_page, start, end,
787 else if (btrfs_test_flag(inode, PREALLOC))
788 ret = run_delalloc_nocow(inode, locked_page, start, end,
791 ret = cow_file_range(inode, locked_page, start, end,
798 * extent_io.c set_bit_hook, used to track delayed allocation
799 * bytes in this file, and to maintain the list of inodes that
800 * have pending delalloc work to be done.
802 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
803 unsigned long old, unsigned long bits)
806 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
807 struct btrfs_root *root = BTRFS_I(inode)->root;
808 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
809 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
810 root->fs_info->delalloc_bytes += end - start + 1;
811 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
812 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
813 &root->fs_info->delalloc_inodes);
815 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
821 * extent_io.c clear_bit_hook, see set_bit_hook for why
823 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
824 unsigned long old, unsigned long bits)
826 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
827 struct btrfs_root *root = BTRFS_I(inode)->root;
830 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
831 if (end - start + 1 > root->fs_info->delalloc_bytes) {
832 printk("warning: delalloc account %Lu %Lu\n",
833 end - start + 1, root->fs_info->delalloc_bytes);
834 root->fs_info->delalloc_bytes = 0;
835 BTRFS_I(inode)->delalloc_bytes = 0;
837 root->fs_info->delalloc_bytes -= end - start + 1;
838 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
840 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
841 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
842 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
844 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
850 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
851 * we don't create bios that span stripes or chunks
853 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
854 size_t size, struct bio *bio,
855 unsigned long bio_flags)
857 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
858 struct btrfs_mapping_tree *map_tree;
859 u64 logical = (u64)bio->bi_sector << 9;
864 length = bio->bi_size;
865 map_tree = &root->fs_info->mapping_tree;
867 ret = btrfs_map_block(map_tree, READ, logical,
868 &map_length, NULL, 0);
870 if (map_length < length + size) {
877 * in order to insert checksums into the metadata in large chunks,
878 * we wait until bio submission time. All the pages in the bio are
879 * checksummed and sums are attached onto the ordered extent record.
881 * At IO completion time the cums attached on the ordered extent record
882 * are inserted into the btree
884 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
885 int mirror_num, unsigned long bio_flags)
887 struct btrfs_root *root = BTRFS_I(inode)->root;
890 ret = btrfs_csum_one_bio(root, inode, bio);
893 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
897 * extent_io.c submission hook. This does the right thing for csum calculation on write,
898 * or reading the csums from the tree before a read
900 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
901 int mirror_num, unsigned long bio_flags)
903 struct btrfs_root *root = BTRFS_I(inode)->root;
907 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
910 skip_sum = btrfs_test_opt(root, NODATASUM) ||
911 btrfs_test_flag(inode, NODATASUM);
913 if (!(rw & (1 << BIO_RW))) {
915 btrfs_lookup_bio_sums(root, inode, bio);
917 if (bio_flags & EXTENT_BIO_COMPRESSED)
918 return btrfs_submit_compressed_read(inode, bio,
919 mirror_num, bio_flags);
921 } else if (!skip_sum) {
922 /* we're doing a write, do the async checksumming */
923 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
924 inode, rw, bio, mirror_num,
925 bio_flags, __btrfs_submit_bio_hook);
929 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
933 * given a list of ordered sums record them in the inode. This happens
934 * at IO completion time based on sums calculated at bio submission time.
936 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
937 struct inode *inode, u64 file_offset,
938 struct list_head *list)
940 struct list_head *cur;
941 struct btrfs_ordered_sum *sum;
943 btrfs_set_trans_block_group(trans, inode);
944 list_for_each(cur, list) {
945 sum = list_entry(cur, struct btrfs_ordered_sum, list);
946 btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root,
952 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
954 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
958 /* see btrfs_writepage_start_hook for details on why this is required */
959 struct btrfs_writepage_fixup {
961 struct btrfs_work work;
964 void btrfs_writepage_fixup_worker(struct btrfs_work *work)
966 struct btrfs_writepage_fixup *fixup;
967 struct btrfs_ordered_extent *ordered;
973 fixup = container_of(work, struct btrfs_writepage_fixup, work);
977 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
978 ClearPageChecked(page);
982 inode = page->mapping->host;
983 page_start = page_offset(page);
984 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
986 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
988 /* already ordered? We're done */
989 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
990 EXTENT_ORDERED, 0)) {
994 ordered = btrfs_lookup_ordered_extent(inode, page_start);
996 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
999 btrfs_start_ordered_extent(inode, ordered, 1);
1003 btrfs_set_extent_delalloc(inode, page_start, page_end);
1004 ClearPageChecked(page);
1006 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1009 page_cache_release(page);
1013 * There are a few paths in the higher layers of the kernel that directly
1014 * set the page dirty bit without asking the filesystem if it is a
1015 * good idea. This causes problems because we want to make sure COW
1016 * properly happens and the data=ordered rules are followed.
1018 * In our case any range that doesn't have the ORDERED bit set
1019 * hasn't been properly setup for IO. We kick off an async process
1020 * to fix it up. The async helper will wait for ordered extents, set
1021 * the delalloc bit and make it safe to write the page.
1023 int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1025 struct inode *inode = page->mapping->host;
1026 struct btrfs_writepage_fixup *fixup;
1027 struct btrfs_root *root = BTRFS_I(inode)->root;
1030 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1035 if (PageChecked(page))
1038 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1042 SetPageChecked(page);
1043 page_cache_get(page);
1044 fixup->work.func = btrfs_writepage_fixup_worker;
1046 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1050 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1051 struct inode *inode, u64 file_pos,
1052 u64 disk_bytenr, u64 disk_num_bytes,
1053 u64 num_bytes, u64 ram_bytes,
1054 u8 compression, u8 encryption,
1055 u16 other_encoding, int extent_type)
1057 struct btrfs_root *root = BTRFS_I(inode)->root;
1058 struct btrfs_file_extent_item *fi;
1059 struct btrfs_path *path;
1060 struct extent_buffer *leaf;
1061 struct btrfs_key ins;
1065 path = btrfs_alloc_path();
1068 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1069 file_pos + num_bytes, file_pos, &hint);
1072 ins.objectid = inode->i_ino;
1073 ins.offset = file_pos;
1074 ins.type = BTRFS_EXTENT_DATA_KEY;
1075 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1077 leaf = path->nodes[0];
1078 fi = btrfs_item_ptr(leaf, path->slots[0],
1079 struct btrfs_file_extent_item);
1080 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1081 btrfs_set_file_extent_type(leaf, fi, extent_type);
1082 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1083 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1084 btrfs_set_file_extent_offset(leaf, fi, 0);
1085 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1086 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1087 btrfs_set_file_extent_compression(leaf, fi, compression);
1088 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1089 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1090 btrfs_mark_buffer_dirty(leaf);
1092 inode_add_bytes(inode, num_bytes);
1093 btrfs_drop_extent_cache(inode, file_pos, file_pos + num_bytes - 1, 0);
1095 ins.objectid = disk_bytenr;
1096 ins.offset = disk_num_bytes;
1097 ins.type = BTRFS_EXTENT_ITEM_KEY;
1098 ret = btrfs_alloc_reserved_extent(trans, root, leaf->start,
1099 root->root_key.objectid,
1100 trans->transid, inode->i_ino, &ins);
1103 btrfs_free_path(path);
1107 /* as ordered data IO finishes, this gets called so we can finish
1108 * an ordered extent if the range of bytes in the file it covers are
1111 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1113 struct btrfs_root *root = BTRFS_I(inode)->root;
1114 struct btrfs_trans_handle *trans;
1115 struct btrfs_ordered_extent *ordered_extent;
1116 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1120 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1124 trans = btrfs_join_transaction(root, 1);
1126 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1127 BUG_ON(!ordered_extent);
1128 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1131 lock_extent(io_tree, ordered_extent->file_offset,
1132 ordered_extent->file_offset + ordered_extent->len - 1,
1135 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1137 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1139 ret = btrfs_mark_extent_written(trans, root, inode,
1140 ordered_extent->file_offset,
1141 ordered_extent->file_offset +
1142 ordered_extent->len);
1145 ret = insert_reserved_file_extent(trans, inode,
1146 ordered_extent->file_offset,
1147 ordered_extent->start,
1148 ordered_extent->disk_len,
1149 ordered_extent->len,
1150 ordered_extent->len,
1152 BTRFS_FILE_EXTENT_REG);
1155 unlock_extent(io_tree, ordered_extent->file_offset,
1156 ordered_extent->file_offset + ordered_extent->len - 1,
1159 add_pending_csums(trans, inode, ordered_extent->file_offset,
1160 &ordered_extent->list);
1162 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1163 btrfs_ordered_update_i_size(inode, ordered_extent);
1164 btrfs_update_inode(trans, root, inode);
1165 btrfs_remove_ordered_extent(inode, ordered_extent);
1166 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1169 btrfs_put_ordered_extent(ordered_extent);
1170 /* once for the tree */
1171 btrfs_put_ordered_extent(ordered_extent);
1173 btrfs_end_transaction(trans, root);
1177 int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1178 struct extent_state *state, int uptodate)
1180 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1184 * When IO fails, either with EIO or csum verification fails, we
1185 * try other mirrors that might have a good copy of the data. This
1186 * io_failure_record is used to record state as we go through all the
1187 * mirrors. If another mirror has good data, the page is set up to date
1188 * and things continue. If a good mirror can't be found, the original
1189 * bio end_io callback is called to indicate things have failed.
1191 struct io_failure_record {
1199 int btrfs_io_failed_hook(struct bio *failed_bio,
1200 struct page *page, u64 start, u64 end,
1201 struct extent_state *state)
1203 struct io_failure_record *failrec = NULL;
1205 struct extent_map *em;
1206 struct inode *inode = page->mapping->host;
1207 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1208 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1214 unsigned long bio_flags = 0;
1216 ret = get_state_private(failure_tree, start, &private);
1218 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1221 failrec->start = start;
1222 failrec->len = end - start + 1;
1223 failrec->last_mirror = 0;
1225 spin_lock(&em_tree->lock);
1226 em = lookup_extent_mapping(em_tree, start, failrec->len);
1227 if (em->start > start || em->start + em->len < start) {
1228 free_extent_map(em);
1231 spin_unlock(&em_tree->lock);
1233 if (!em || IS_ERR(em)) {
1237 logical = start - em->start;
1238 logical = em->block_start + logical;
1239 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
1240 bio_flags = EXTENT_BIO_COMPRESSED;
1241 failrec->logical = logical;
1242 free_extent_map(em);
1243 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1244 EXTENT_DIRTY, GFP_NOFS);
1245 set_state_private(failure_tree, start,
1246 (u64)(unsigned long)failrec);
1248 failrec = (struct io_failure_record *)(unsigned long)private;
1250 num_copies = btrfs_num_copies(
1251 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1252 failrec->logical, failrec->len);
1253 failrec->last_mirror++;
1255 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
1256 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1259 if (state && state->start != failrec->start)
1261 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
1263 if (!state || failrec->last_mirror > num_copies) {
1264 set_state_private(failure_tree, failrec->start, 0);
1265 clear_extent_bits(failure_tree, failrec->start,
1266 failrec->start + failrec->len - 1,
1267 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1271 bio = bio_alloc(GFP_NOFS, 1);
1272 bio->bi_private = state;
1273 bio->bi_end_io = failed_bio->bi_end_io;
1274 bio->bi_sector = failrec->logical >> 9;
1275 bio->bi_bdev = failed_bio->bi_bdev;
1277 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1278 if (failed_bio->bi_rw & (1 << BIO_RW))
1283 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1284 failrec->last_mirror,
1290 * each time an IO finishes, we do a fast check in the IO failure tree
1291 * to see if we need to process or clean up an io_failure_record
1293 int btrfs_clean_io_failures(struct inode *inode, u64 start)
1296 u64 private_failure;
1297 struct io_failure_record *failure;
1301 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1302 (u64)-1, 1, EXTENT_DIRTY)) {
1303 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1304 start, &private_failure);
1306 failure = (struct io_failure_record *)(unsigned long)
1308 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1310 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1312 failure->start + failure->len - 1,
1313 EXTENT_DIRTY | EXTENT_LOCKED,
1322 * when reads are done, we need to check csums to verify the data is correct
1323 * if there's a match, we allow the bio to finish. If not, we go through
1324 * the io_failure_record routines to find good copies
1326 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1327 struct extent_state *state)
1329 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1330 struct inode *inode = page->mapping->host;
1331 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1333 u64 private = ~(u32)0;
1335 struct btrfs_root *root = BTRFS_I(inode)->root;
1337 unsigned long flags;
1339 if (btrfs_test_opt(root, NODATASUM) ||
1340 btrfs_test_flag(inode, NODATASUM))
1342 if (state && state->start == start) {
1343 private = state->private;
1346 ret = get_state_private(io_tree, start, &private);
1348 local_irq_save(flags);
1349 kaddr = kmap_atomic(page, KM_IRQ0);
1353 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1354 btrfs_csum_final(csum, (char *)&csum);
1355 if (csum != private) {
1358 kunmap_atomic(kaddr, KM_IRQ0);
1359 local_irq_restore(flags);
1361 /* if the io failure tree for this inode is non-empty,
1362 * check to see if we've recovered from a failed IO
1364 btrfs_clean_io_failures(inode, start);
1368 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
1369 page->mapping->host->i_ino, (unsigned long long)start, csum,
1371 memset(kaddr + offset, 1, end - start + 1);
1372 flush_dcache_page(page);
1373 kunmap_atomic(kaddr, KM_IRQ0);
1374 local_irq_restore(flags);
1381 * This creates an orphan entry for the given inode in case something goes
1382 * wrong in the middle of an unlink/truncate.
1384 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1386 struct btrfs_root *root = BTRFS_I(inode)->root;
1389 spin_lock(&root->list_lock);
1391 /* already on the orphan list, we're good */
1392 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1393 spin_unlock(&root->list_lock);
1397 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1399 spin_unlock(&root->list_lock);
1402 * insert an orphan item to track this unlinked/truncated file
1404 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1410 * We have done the truncate/delete so we can go ahead and remove the orphan
1411 * item for this particular inode.
1413 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1415 struct btrfs_root *root = BTRFS_I(inode)->root;
1418 spin_lock(&root->list_lock);
1420 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1421 spin_unlock(&root->list_lock);
1425 list_del_init(&BTRFS_I(inode)->i_orphan);
1427 spin_unlock(&root->list_lock);
1431 spin_unlock(&root->list_lock);
1433 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1439 * this cleans up any orphans that may be left on the list from the last use
1442 void btrfs_orphan_cleanup(struct btrfs_root *root)
1444 struct btrfs_path *path;
1445 struct extent_buffer *leaf;
1446 struct btrfs_item *item;
1447 struct btrfs_key key, found_key;
1448 struct btrfs_trans_handle *trans;
1449 struct inode *inode;
1450 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1452 /* don't do orphan cleanup if the fs is readonly. */
1453 if (root->fs_info->sb->s_flags & MS_RDONLY)
1456 path = btrfs_alloc_path();
1461 key.objectid = BTRFS_ORPHAN_OBJECTID;
1462 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1463 key.offset = (u64)-1;
1467 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1469 printk(KERN_ERR "Error searching slot for orphan: %d"
1475 * if ret == 0 means we found what we were searching for, which
1476 * is weird, but possible, so only screw with path if we didnt
1477 * find the key and see if we have stuff that matches
1480 if (path->slots[0] == 0)
1485 /* pull out the item */
1486 leaf = path->nodes[0];
1487 item = btrfs_item_nr(leaf, path->slots[0]);
1488 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1490 /* make sure the item matches what we want */
1491 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1493 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1496 /* release the path since we're done with it */
1497 btrfs_release_path(root, path);
1500 * this is where we are basically btrfs_lookup, without the
1501 * crossing root thing. we store the inode number in the
1502 * offset of the orphan item.
1504 inode = btrfs_iget_locked(root->fs_info->sb,
1505 found_key.offset, root);
1509 if (inode->i_state & I_NEW) {
1510 BTRFS_I(inode)->root = root;
1512 /* have to set the location manually */
1513 BTRFS_I(inode)->location.objectid = inode->i_ino;
1514 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
1515 BTRFS_I(inode)->location.offset = 0;
1517 btrfs_read_locked_inode(inode);
1518 unlock_new_inode(inode);
1522 * add this inode to the orphan list so btrfs_orphan_del does
1523 * the proper thing when we hit it
1525 spin_lock(&root->list_lock);
1526 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1527 spin_unlock(&root->list_lock);
1530 * if this is a bad inode, means we actually succeeded in
1531 * removing the inode, but not the orphan record, which means
1532 * we need to manually delete the orphan since iput will just
1533 * do a destroy_inode
1535 if (is_bad_inode(inode)) {
1536 trans = btrfs_start_transaction(root, 1);
1537 btrfs_orphan_del(trans, inode);
1538 btrfs_end_transaction(trans, root);
1543 /* if we have links, this was a truncate, lets do that */
1544 if (inode->i_nlink) {
1546 btrfs_truncate(inode);
1551 /* this will do delete_inode and everything for us */
1556 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
1558 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
1560 btrfs_free_path(path);
1564 * read an inode from the btree into the in-memory inode
1566 void btrfs_read_locked_inode(struct inode *inode)
1568 struct btrfs_path *path;
1569 struct extent_buffer *leaf;
1570 struct btrfs_inode_item *inode_item;
1571 struct btrfs_timespec *tspec;
1572 struct btrfs_root *root = BTRFS_I(inode)->root;
1573 struct btrfs_key location;
1574 u64 alloc_group_block;
1578 path = btrfs_alloc_path();
1580 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
1582 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
1586 leaf = path->nodes[0];
1587 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1588 struct btrfs_inode_item);
1590 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
1591 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
1592 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
1593 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
1594 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
1596 tspec = btrfs_inode_atime(inode_item);
1597 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1598 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1600 tspec = btrfs_inode_mtime(inode_item);
1601 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1602 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1604 tspec = btrfs_inode_ctime(inode_item);
1605 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
1606 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
1608 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
1609 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
1610 inode->i_generation = BTRFS_I(inode)->generation;
1612 rdev = btrfs_inode_rdev(leaf, inode_item);
1614 BTRFS_I(inode)->index_cnt = (u64)-1;
1616 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
1617 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
1619 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
1620 if (!BTRFS_I(inode)->block_group) {
1621 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
1623 BTRFS_BLOCK_GROUP_METADATA, 0);
1625 btrfs_free_path(path);
1628 switch (inode->i_mode & S_IFMT) {
1630 inode->i_mapping->a_ops = &btrfs_aops;
1631 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1632 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1633 inode->i_fop = &btrfs_file_operations;
1634 inode->i_op = &btrfs_file_inode_operations;
1637 inode->i_fop = &btrfs_dir_file_operations;
1638 if (root == root->fs_info->tree_root)
1639 inode->i_op = &btrfs_dir_ro_inode_operations;
1641 inode->i_op = &btrfs_dir_inode_operations;
1644 inode->i_op = &btrfs_symlink_inode_operations;
1645 inode->i_mapping->a_ops = &btrfs_symlink_aops;
1646 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1649 init_special_inode(inode, inode->i_mode, rdev);
1655 btrfs_free_path(path);
1656 make_bad_inode(inode);
1660 * given a leaf and an inode, copy the inode fields into the leaf
1662 static void fill_inode_item(struct btrfs_trans_handle *trans,
1663 struct extent_buffer *leaf,
1664 struct btrfs_inode_item *item,
1665 struct inode *inode)
1667 btrfs_set_inode_uid(leaf, item, inode->i_uid);
1668 btrfs_set_inode_gid(leaf, item, inode->i_gid);
1669 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
1670 btrfs_set_inode_mode(leaf, item, inode->i_mode);
1671 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
1673 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
1674 inode->i_atime.tv_sec);
1675 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
1676 inode->i_atime.tv_nsec);
1678 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
1679 inode->i_mtime.tv_sec);
1680 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
1681 inode->i_mtime.tv_nsec);
1683 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
1684 inode->i_ctime.tv_sec);
1685 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
1686 inode->i_ctime.tv_nsec);
1688 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
1689 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
1690 btrfs_set_inode_transid(leaf, item, trans->transid);
1691 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
1692 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
1693 btrfs_set_inode_block_group(leaf, item,
1694 BTRFS_I(inode)->block_group->key.objectid);
1698 * copy everything in the in-memory inode into the btree.
1700 int noinline btrfs_update_inode(struct btrfs_trans_handle *trans,
1701 struct btrfs_root *root,
1702 struct inode *inode)
1704 struct btrfs_inode_item *inode_item;
1705 struct btrfs_path *path;
1706 struct extent_buffer *leaf;
1709 path = btrfs_alloc_path();
1711 ret = btrfs_lookup_inode(trans, root, path,
1712 &BTRFS_I(inode)->location, 1);
1719 leaf = path->nodes[0];
1720 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1721 struct btrfs_inode_item);
1723 fill_inode_item(trans, leaf, inode_item, inode);
1724 btrfs_mark_buffer_dirty(leaf);
1725 btrfs_set_inode_last_trans(trans, inode);
1728 btrfs_free_path(path);
1734 * unlink helper that gets used here in inode.c and in the tree logging
1735 * recovery code. It remove a link in a directory with a given name, and
1736 * also drops the back refs in the inode to the directory
1738 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
1739 struct btrfs_root *root,
1740 struct inode *dir, struct inode *inode,
1741 const char *name, int name_len)
1743 struct btrfs_path *path;
1745 struct extent_buffer *leaf;
1746 struct btrfs_dir_item *di;
1747 struct btrfs_key key;
1750 path = btrfs_alloc_path();
1756 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
1757 name, name_len, -1);
1766 leaf = path->nodes[0];
1767 btrfs_dir_item_key_to_cpu(leaf, di, &key);
1768 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1771 btrfs_release_path(root, path);
1773 ret = btrfs_del_inode_ref(trans, root, name, name_len,
1775 dir->i_ino, &index);
1777 printk("failed to delete reference to %.*s, "
1778 "inode %lu parent %lu\n", name_len, name,
1779 inode->i_ino, dir->i_ino);
1783 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
1784 index, name, name_len, -1);
1793 ret = btrfs_delete_one_dir_name(trans, root, path, di);
1794 btrfs_release_path(root, path);
1796 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
1798 BUG_ON(ret != 0 && ret != -ENOENT);
1800 BTRFS_I(dir)->log_dirty_trans = trans->transid;
1802 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
1806 btrfs_free_path(path);
1810 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
1811 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
1812 btrfs_update_inode(trans, root, dir);
1813 btrfs_drop_nlink(inode);
1814 ret = btrfs_update_inode(trans, root, inode);
1815 dir->i_sb->s_dirt = 1;
1820 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
1822 struct btrfs_root *root;
1823 struct btrfs_trans_handle *trans;
1824 struct inode *inode = dentry->d_inode;
1826 unsigned long nr = 0;
1828 root = BTRFS_I(dir)->root;
1830 ret = btrfs_check_free_space(root, 1, 1);
1834 trans = btrfs_start_transaction(root, 1);
1836 btrfs_set_trans_block_group(trans, dir);
1837 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1838 dentry->d_name.name, dentry->d_name.len);
1840 if (inode->i_nlink == 0)
1841 ret = btrfs_orphan_add(trans, inode);
1843 nr = trans->blocks_used;
1845 btrfs_end_transaction_throttle(trans, root);
1847 btrfs_btree_balance_dirty(root, nr);
1851 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
1853 struct inode *inode = dentry->d_inode;
1856 struct btrfs_root *root = BTRFS_I(dir)->root;
1857 struct btrfs_trans_handle *trans;
1858 unsigned long nr = 0;
1860 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
1864 ret = btrfs_check_free_space(root, 1, 1);
1868 trans = btrfs_start_transaction(root, 1);
1869 btrfs_set_trans_block_group(trans, dir);
1871 err = btrfs_orphan_add(trans, inode);
1875 /* now the directory is empty */
1876 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
1877 dentry->d_name.name, dentry->d_name.len);
1879 btrfs_i_size_write(inode, 0);
1883 nr = trans->blocks_used;
1884 ret = btrfs_end_transaction_throttle(trans, root);
1886 btrfs_btree_balance_dirty(root, nr);
1894 * when truncating bytes in a file, it is possible to avoid reading
1895 * the leaves that contain only checksum items. This can be the
1896 * majority of the IO required to delete a large file, but it must
1897 * be done carefully.
1899 * The keys in the level just above the leaves are checked to make sure
1900 * the lowest key in a given leaf is a csum key, and starts at an offset
1901 * after the new size.
1903 * Then the key for the next leaf is checked to make sure it also has
1904 * a checksum item for the same file. If it does, we know our target leaf
1905 * contains only checksum items, and it can be safely freed without reading
1908 * This is just an optimization targeted at large files. It may do
1909 * nothing. It will return 0 unless things went badly.
1911 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
1912 struct btrfs_root *root,
1913 struct btrfs_path *path,
1914 struct inode *inode, u64 new_size)
1916 struct btrfs_key key;
1919 struct btrfs_key found_key;
1920 struct btrfs_key other_key;
1921 struct btrfs_leaf_ref *ref;
1925 path->lowest_level = 1;
1926 key.objectid = inode->i_ino;
1927 key.type = BTRFS_CSUM_ITEM_KEY;
1928 key.offset = new_size;
1930 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1934 if (path->nodes[1] == NULL) {
1939 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
1940 nritems = btrfs_header_nritems(path->nodes[1]);
1945 if (path->slots[1] >= nritems)
1948 /* did we find a key greater than anything we want to delete? */
1949 if (found_key.objectid > inode->i_ino ||
1950 (found_key.objectid == inode->i_ino && found_key.type > key.type))
1953 /* we check the next key in the node to make sure the leave contains
1954 * only checksum items. This comparison doesn't work if our
1955 * leaf is the last one in the node
1957 if (path->slots[1] + 1 >= nritems) {
1959 /* search forward from the last key in the node, this
1960 * will bring us into the next node in the tree
1962 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
1964 /* unlikely, but we inc below, so check to be safe */
1965 if (found_key.offset == (u64)-1)
1968 /* search_forward needs a path with locks held, do the
1969 * search again for the original key. It is possible
1970 * this will race with a balance and return a path that
1971 * we could modify, but this drop is just an optimization
1972 * and is allowed to miss some leaves.
1974 btrfs_release_path(root, path);
1977 /* setup a max key for search_forward */
1978 other_key.offset = (u64)-1;
1979 other_key.type = key.type;
1980 other_key.objectid = key.objectid;
1982 path->keep_locks = 1;
1983 ret = btrfs_search_forward(root, &found_key, &other_key,
1985 path->keep_locks = 0;
1986 if (ret || found_key.objectid != key.objectid ||
1987 found_key.type != key.type) {
1992 key.offset = found_key.offset;
1993 btrfs_release_path(root, path);
1998 /* we know there's one more slot after us in the tree,
1999 * read that key so we can verify it is also a checksum item
2001 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2003 if (found_key.objectid < inode->i_ino)
2006 if (found_key.type != key.type || found_key.offset < new_size)
2010 * if the key for the next leaf isn't a csum key from this objectid,
2011 * we can't be sure there aren't good items inside this leaf.
2014 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2017 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2018 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2020 * it is safe to delete this leaf, it contains only
2021 * csum items from this inode at an offset >= new_size
2023 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2026 if (root->ref_cows && leaf_gen < trans->transid) {
2027 ref = btrfs_alloc_leaf_ref(root, 0);
2029 ref->root_gen = root->root_key.offset;
2030 ref->bytenr = leaf_start;
2032 ref->generation = leaf_gen;
2035 ret = btrfs_add_leaf_ref(root, ref, 0);
2037 btrfs_free_leaf_ref(root, ref);
2043 btrfs_release_path(root, path);
2045 if (other_key.objectid == inode->i_ino &&
2046 other_key.type == key.type && other_key.offset > key.offset) {
2047 key.offset = other_key.offset;
2053 /* fixup any changes we've made to the path */
2054 path->lowest_level = 0;
2055 path->keep_locks = 0;
2056 btrfs_release_path(root, path);
2061 * this can truncate away extent items, csum items and directory items.
2062 * It starts at a high offset and removes keys until it can't find
2063 * any higher than new_size
2065 * csum items that cross the new i_size are truncated to the new size
2068 * min_type is the minimum key type to truncate down to. If set to 0, this
2069 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2071 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2072 struct btrfs_root *root,
2073 struct inode *inode,
2074 u64 new_size, u32 min_type)
2077 struct btrfs_path *path;
2078 struct btrfs_key key;
2079 struct btrfs_key found_key;
2081 struct extent_buffer *leaf;
2082 struct btrfs_file_extent_item *fi;
2083 u64 extent_start = 0;
2084 u64 extent_num_bytes = 0;
2090 int pending_del_nr = 0;
2091 int pending_del_slot = 0;
2092 int extent_type = -1;
2093 u64 mask = root->sectorsize - 1;
2096 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2097 path = btrfs_alloc_path();
2101 /* FIXME, add redo link to tree so we don't leak on crash */
2102 key.objectid = inode->i_ino;
2103 key.offset = (u64)-1;
2106 btrfs_init_path(path);
2108 ret = drop_csum_leaves(trans, root, path, inode, new_size);
2112 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2117 /* there are no items in the tree for us to truncate, we're
2120 if (path->slots[0] == 0) {
2129 leaf = path->nodes[0];
2130 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2131 found_type = btrfs_key_type(&found_key);
2133 if (found_key.objectid != inode->i_ino)
2136 if (found_type < min_type)
2139 item_end = found_key.offset;
2140 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2141 fi = btrfs_item_ptr(leaf, path->slots[0],
2142 struct btrfs_file_extent_item);
2143 extent_type = btrfs_file_extent_type(leaf, fi);
2144 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2146 btrfs_file_extent_num_bytes(leaf, fi);
2147 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2148 item_end += btrfs_file_extent_inline_len(leaf,
2153 if (found_type == BTRFS_CSUM_ITEM_KEY) {
2154 ret = btrfs_csum_truncate(trans, root, path,
2158 if (item_end < new_size) {
2159 if (found_type == BTRFS_DIR_ITEM_KEY) {
2160 found_type = BTRFS_INODE_ITEM_KEY;
2161 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
2162 found_type = BTRFS_CSUM_ITEM_KEY;
2163 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
2164 found_type = BTRFS_XATTR_ITEM_KEY;
2165 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
2166 found_type = BTRFS_INODE_REF_KEY;
2167 } else if (found_type) {
2172 btrfs_set_key_type(&key, found_type);
2175 if (found_key.offset >= new_size)
2181 /* FIXME, shrink the extent if the ref count is only 1 */
2182 if (found_type != BTRFS_EXTENT_DATA_KEY)
2185 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2187 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2189 u64 orig_num_bytes =
2190 btrfs_file_extent_num_bytes(leaf, fi);
2191 extent_num_bytes = new_size -
2192 found_key.offset + root->sectorsize - 1;
2193 extent_num_bytes = extent_num_bytes &
2194 ~((u64)root->sectorsize - 1);
2195 btrfs_set_file_extent_num_bytes(leaf, fi,
2197 num_dec = (orig_num_bytes -
2199 if (root->ref_cows && extent_start != 0)
2200 inode_sub_bytes(inode, num_dec);
2201 btrfs_mark_buffer_dirty(leaf);
2204 btrfs_file_extent_disk_num_bytes(leaf,
2206 /* FIXME blocksize != 4096 */
2207 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2208 if (extent_start != 0) {
2211 inode_sub_bytes(inode, num_dec);
2213 root_gen = btrfs_header_generation(leaf);
2214 root_owner = btrfs_header_owner(leaf);
2216 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2218 * we can't truncate inline items that have had
2222 btrfs_file_extent_compression(leaf, fi) == 0 &&
2223 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2224 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2225 u32 size = new_size - found_key.offset;
2227 if (root->ref_cows) {
2228 inode_sub_bytes(inode, item_end + 1 -
2232 btrfs_file_extent_calc_inline_size(size);
2233 ret = btrfs_truncate_item(trans, root, path,
2236 } else if (root->ref_cows) {
2237 inode_sub_bytes(inode, item_end + 1 -
2243 if (!pending_del_nr) {
2244 /* no pending yet, add ourselves */
2245 pending_del_slot = path->slots[0];
2247 } else if (pending_del_nr &&
2248 path->slots[0] + 1 == pending_del_slot) {
2249 /* hop on the pending chunk */
2251 pending_del_slot = path->slots[0];
2253 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
2259 ret = btrfs_free_extent(trans, root, extent_start,
2261 leaf->start, root_owner,
2262 root_gen, inode->i_ino, 0);
2266 if (path->slots[0] == 0) {
2269 btrfs_release_path(root, path);
2274 if (pending_del_nr &&
2275 path->slots[0] + 1 != pending_del_slot) {
2276 struct btrfs_key debug;
2278 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2280 ret = btrfs_del_items(trans, root, path,
2285 btrfs_release_path(root, path);
2291 if (pending_del_nr) {
2292 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2295 btrfs_free_path(path);
2296 inode->i_sb->s_dirt = 1;
2301 * taken from block_truncate_page, but does cow as it zeros out
2302 * any bytes left in the last page in the file.
2304 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2306 struct inode *inode = mapping->host;
2307 struct btrfs_root *root = BTRFS_I(inode)->root;
2308 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2309 struct btrfs_ordered_extent *ordered;
2311 u32 blocksize = root->sectorsize;
2312 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2313 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2319 if ((offset & (blocksize - 1)) == 0)
2324 page = grab_cache_page(mapping, index);
2328 page_start = page_offset(page);
2329 page_end = page_start + PAGE_CACHE_SIZE - 1;
2331 if (!PageUptodate(page)) {
2332 ret = btrfs_readpage(NULL, page);
2334 if (page->mapping != mapping) {
2336 page_cache_release(page);
2339 if (!PageUptodate(page)) {
2344 wait_on_page_writeback(page);
2346 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2347 set_page_extent_mapped(page);
2349 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2351 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2353 page_cache_release(page);
2354 btrfs_start_ordered_extent(inode, ordered, 1);
2355 btrfs_put_ordered_extent(ordered);
2359 btrfs_set_extent_delalloc(inode, page_start, page_end);
2361 if (offset != PAGE_CACHE_SIZE) {
2363 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2364 flush_dcache_page(page);
2367 ClearPageChecked(page);
2368 set_page_dirty(page);
2369 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2373 page_cache_release(page);
2378 int btrfs_cont_expand(struct inode *inode, loff_t size)
2380 struct btrfs_trans_handle *trans;
2381 struct btrfs_root *root = BTRFS_I(inode)->root;
2382 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2383 struct extent_map *em;
2384 u64 mask = root->sectorsize - 1;
2385 u64 hole_start = (inode->i_size + mask) & ~mask;
2386 u64 block_end = (size + mask) & ~mask;
2392 if (size <= hole_start)
2395 err = btrfs_check_free_space(root, 1, 0);
2399 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2402 struct btrfs_ordered_extent *ordered;
2403 btrfs_wait_ordered_range(inode, hole_start,
2404 block_end - hole_start);
2405 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2406 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2409 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2410 btrfs_put_ordered_extent(ordered);
2413 trans = btrfs_start_transaction(root, 1);
2414 btrfs_set_trans_block_group(trans, inode);
2416 cur_offset = hole_start;
2418 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2419 block_end - cur_offset, 0);
2420 BUG_ON(IS_ERR(em) || !em);
2421 last_byte = min(extent_map_end(em), block_end);
2422 last_byte = (last_byte + mask) & ~mask;
2423 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2424 hole_size = last_byte - cur_offset;
2425 err = btrfs_insert_file_extent(trans, root,
2426 inode->i_ino, cur_offset, 0,
2427 0, hole_size, 0, hole_size,
2429 btrfs_drop_extent_cache(inode, hole_start,
2432 free_extent_map(em);
2433 cur_offset = last_byte;
2434 if (err || cur_offset >= block_end)
2438 btrfs_end_transaction(trans, root);
2439 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2443 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2445 struct inode *inode = dentry->d_inode;
2448 err = inode_change_ok(inode, attr);
2452 if (S_ISREG(inode->i_mode) &&
2453 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
2454 err = btrfs_cont_expand(inode, attr->ia_size);
2459 err = inode_setattr(inode, attr);
2461 if (!err && ((attr->ia_valid & ATTR_MODE)))
2462 err = btrfs_acl_chmod(inode);
2466 void btrfs_delete_inode(struct inode *inode)
2468 struct btrfs_trans_handle *trans;
2469 struct btrfs_root *root = BTRFS_I(inode)->root;
2473 truncate_inode_pages(&inode->i_data, 0);
2474 if (is_bad_inode(inode)) {
2475 btrfs_orphan_del(NULL, inode);
2478 btrfs_wait_ordered_range(inode, 0, (u64)-1);
2480 btrfs_i_size_write(inode, 0);
2481 trans = btrfs_start_transaction(root, 1);
2483 btrfs_set_trans_block_group(trans, inode);
2484 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
2486 btrfs_orphan_del(NULL, inode);
2487 goto no_delete_lock;
2490 btrfs_orphan_del(trans, inode);
2492 nr = trans->blocks_used;
2495 btrfs_end_transaction(trans, root);
2496 btrfs_btree_balance_dirty(root, nr);
2500 nr = trans->blocks_used;
2501 btrfs_end_transaction(trans, root);
2502 btrfs_btree_balance_dirty(root, nr);
2508 * this returns the key found in the dir entry in the location pointer.
2509 * If no dir entries were found, location->objectid is 0.
2511 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
2512 struct btrfs_key *location)
2514 const char *name = dentry->d_name.name;
2515 int namelen = dentry->d_name.len;
2516 struct btrfs_dir_item *di;
2517 struct btrfs_path *path;
2518 struct btrfs_root *root = BTRFS_I(dir)->root;
2521 path = btrfs_alloc_path();
2524 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
2528 if (!di || IS_ERR(di)) {
2531 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
2533 btrfs_free_path(path);
2536 location->objectid = 0;
2541 * when we hit a tree root in a directory, the btrfs part of the inode
2542 * needs to be changed to reflect the root directory of the tree root. This
2543 * is kind of like crossing a mount point.
2545 static int fixup_tree_root_location(struct btrfs_root *root,
2546 struct btrfs_key *location,
2547 struct btrfs_root **sub_root,
2548 struct dentry *dentry)
2550 struct btrfs_root_item *ri;
2552 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
2554 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
2557 *sub_root = btrfs_read_fs_root(root->fs_info, location,
2558 dentry->d_name.name,
2559 dentry->d_name.len);
2560 if (IS_ERR(*sub_root))
2561 return PTR_ERR(*sub_root);
2563 ri = &(*sub_root)->root_item;
2564 location->objectid = btrfs_root_dirid(ri);
2565 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
2566 location->offset = 0;
2571 static noinline void init_btrfs_i(struct inode *inode)
2573 struct btrfs_inode *bi = BTRFS_I(inode);
2576 bi->i_default_acl = NULL;
2580 bi->logged_trans = 0;
2581 bi->delalloc_bytes = 0;
2582 bi->disk_i_size = 0;
2584 bi->index_cnt = (u64)-1;
2585 bi->log_dirty_trans = 0;
2586 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2587 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2588 inode->i_mapping, GFP_NOFS);
2589 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2590 inode->i_mapping, GFP_NOFS);
2591 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
2592 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
2593 mutex_init(&BTRFS_I(inode)->csum_mutex);
2594 mutex_init(&BTRFS_I(inode)->extent_mutex);
2595 mutex_init(&BTRFS_I(inode)->log_mutex);
2598 static int btrfs_init_locked_inode(struct inode *inode, void *p)
2600 struct btrfs_iget_args *args = p;
2601 inode->i_ino = args->ino;
2602 init_btrfs_i(inode);
2603 BTRFS_I(inode)->root = args->root;
2607 static int btrfs_find_actor(struct inode *inode, void *opaque)
2609 struct btrfs_iget_args *args = opaque;
2610 return (args->ino == inode->i_ino &&
2611 args->root == BTRFS_I(inode)->root);
2614 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
2615 struct btrfs_root *root, int wait)
2617 struct inode *inode;
2618 struct btrfs_iget_args args;
2619 args.ino = objectid;
2623 inode = ilookup5(s, objectid, btrfs_find_actor,
2626 inode = ilookup5_nowait(s, objectid, btrfs_find_actor,
2632 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
2633 struct btrfs_root *root)
2635 struct inode *inode;
2636 struct btrfs_iget_args args;
2637 args.ino = objectid;
2640 inode = iget5_locked(s, objectid, btrfs_find_actor,
2641 btrfs_init_locked_inode,
2646 /* Get an inode object given its location and corresponding root.
2647 * Returns in *is_new if the inode was read from disk
2649 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
2650 struct btrfs_root *root, int *is_new)
2652 struct inode *inode;
2654 inode = btrfs_iget_locked(s, location->objectid, root);
2656 return ERR_PTR(-EACCES);
2658 if (inode->i_state & I_NEW) {
2659 BTRFS_I(inode)->root = root;
2660 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
2661 btrfs_read_locked_inode(inode);
2662 unlock_new_inode(inode);
2673 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
2674 struct nameidata *nd)
2676 struct inode * inode;
2677 struct btrfs_inode *bi = BTRFS_I(dir);
2678 struct btrfs_root *root = bi->root;
2679 struct btrfs_root *sub_root = root;
2680 struct btrfs_key location;
2681 int ret, new, do_orphan = 0;
2683 if (dentry->d_name.len > BTRFS_NAME_LEN)
2684 return ERR_PTR(-ENAMETOOLONG);
2686 ret = btrfs_inode_by_name(dir, dentry, &location);
2689 return ERR_PTR(ret);
2692 if (location.objectid) {
2693 ret = fixup_tree_root_location(root, &location, &sub_root,
2696 return ERR_PTR(ret);
2698 return ERR_PTR(-ENOENT);
2699 inode = btrfs_iget(dir->i_sb, &location, sub_root, &new);
2701 return ERR_CAST(inode);
2703 /* the inode and parent dir are two different roots */
2704 if (new && root != sub_root) {
2706 sub_root->inode = inode;
2711 if (unlikely(do_orphan))
2712 btrfs_orphan_cleanup(sub_root);
2714 return d_splice_alias(inode, dentry);
2717 static unsigned char btrfs_filetype_table[] = {
2718 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
2721 static int btrfs_real_readdir(struct file *filp, void *dirent,
2724 struct inode *inode = filp->f_dentry->d_inode;
2725 struct btrfs_root *root = BTRFS_I(inode)->root;
2726 struct btrfs_item *item;
2727 struct btrfs_dir_item *di;
2728 struct btrfs_key key;
2729 struct btrfs_key found_key;
2730 struct btrfs_path *path;
2733 struct extent_buffer *leaf;
2736 unsigned char d_type;
2741 int key_type = BTRFS_DIR_INDEX_KEY;
2746 /* FIXME, use a real flag for deciding about the key type */
2747 if (root->fs_info->tree_root == root)
2748 key_type = BTRFS_DIR_ITEM_KEY;
2750 /* special case for "." */
2751 if (filp->f_pos == 0) {
2752 over = filldir(dirent, ".", 1,
2759 /* special case for .., just use the back ref */
2760 if (filp->f_pos == 1) {
2761 u64 pino = parent_ino(filp->f_path.dentry);
2762 over = filldir(dirent, "..", 2,
2769 path = btrfs_alloc_path();
2772 btrfs_set_key_type(&key, key_type);
2773 key.offset = filp->f_pos;
2774 key.objectid = inode->i_ino;
2776 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2782 leaf = path->nodes[0];
2783 nritems = btrfs_header_nritems(leaf);
2784 slot = path->slots[0];
2785 if (advance || slot >= nritems) {
2786 if (slot >= nritems - 1) {
2787 ret = btrfs_next_leaf(root, path);
2790 leaf = path->nodes[0];
2791 nritems = btrfs_header_nritems(leaf);
2792 slot = path->slots[0];
2799 item = btrfs_item_nr(leaf, slot);
2800 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2802 if (found_key.objectid != key.objectid)
2804 if (btrfs_key_type(&found_key) != key_type)
2806 if (found_key.offset < filp->f_pos)
2809 filp->f_pos = found_key.offset;
2811 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
2813 di_total = btrfs_item_size(leaf, item);
2815 while (di_cur < di_total) {
2816 struct btrfs_key location;
2818 name_len = btrfs_dir_name_len(leaf, di);
2819 if (name_len <= sizeof(tmp_name)) {
2820 name_ptr = tmp_name;
2822 name_ptr = kmalloc(name_len, GFP_NOFS);
2828 read_extent_buffer(leaf, name_ptr,
2829 (unsigned long)(di + 1), name_len);
2831 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
2832 btrfs_dir_item_key_to_cpu(leaf, di, &location);
2833 over = filldir(dirent, name_ptr, name_len,
2834 found_key.offset, location.objectid,
2837 if (name_ptr != tmp_name)
2843 di_len = btrfs_dir_name_len(leaf, di) +
2844 btrfs_dir_data_len(leaf, di) + sizeof(*di);
2846 di = (struct btrfs_dir_item *)((char *)di + di_len);
2850 /* Reached end of directory/root. Bump pos past the last item. */
2851 if (key_type == BTRFS_DIR_INDEX_KEY)
2852 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
2858 btrfs_free_path(path);
2862 int btrfs_write_inode(struct inode *inode, int wait)
2864 struct btrfs_root *root = BTRFS_I(inode)->root;
2865 struct btrfs_trans_handle *trans;
2868 if (root->fs_info->closing > 1)
2872 trans = btrfs_join_transaction(root, 1);
2873 btrfs_set_trans_block_group(trans, inode);
2874 ret = btrfs_commit_transaction(trans, root);
2880 * This is somewhat expensive, updating the tree every time the
2881 * inode changes. But, it is most likely to find the inode in cache.
2882 * FIXME, needs more benchmarking...there are no reasons other than performance
2883 * to keep or drop this code.
2885 void btrfs_dirty_inode(struct inode *inode)
2887 struct btrfs_root *root = BTRFS_I(inode)->root;
2888 struct btrfs_trans_handle *trans;
2890 trans = btrfs_join_transaction(root, 1);
2891 btrfs_set_trans_block_group(trans, inode);
2892 btrfs_update_inode(trans, root, inode);
2893 btrfs_end_transaction(trans, root);
2897 * find the highest existing sequence number in a directory
2898 * and then set the in-memory index_cnt variable to reflect
2899 * free sequence numbers
2901 static int btrfs_set_inode_index_count(struct inode *inode)
2903 struct btrfs_root *root = BTRFS_I(inode)->root;
2904 struct btrfs_key key, found_key;
2905 struct btrfs_path *path;
2906 struct extent_buffer *leaf;
2909 key.objectid = inode->i_ino;
2910 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
2911 key.offset = (u64)-1;
2913 path = btrfs_alloc_path();
2917 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2920 /* FIXME: we should be able to handle this */
2926 * MAGIC NUMBER EXPLANATION:
2927 * since we search a directory based on f_pos we have to start at 2
2928 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
2929 * else has to start at 2
2931 if (path->slots[0] == 0) {
2932 BTRFS_I(inode)->index_cnt = 2;
2938 leaf = path->nodes[0];
2939 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2941 if (found_key.objectid != inode->i_ino ||
2942 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
2943 BTRFS_I(inode)->index_cnt = 2;
2947 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
2949 btrfs_free_path(path);
2954 * helper to find a free sequence number in a given directory. This current
2955 * code is very simple, later versions will do smarter things in the btree
2957 static int btrfs_set_inode_index(struct inode *dir, struct inode *inode,
2962 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
2963 ret = btrfs_set_inode_index_count(dir);
2969 *index = BTRFS_I(dir)->index_cnt;
2970 BTRFS_I(dir)->index_cnt++;
2975 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
2976 struct btrfs_root *root,
2978 const char *name, int name_len,
2981 struct btrfs_block_group_cache *group,
2982 int mode, u64 *index)
2984 struct inode *inode;
2985 struct btrfs_inode_item *inode_item;
2986 struct btrfs_block_group_cache *new_inode_group;
2987 struct btrfs_key *location;
2988 struct btrfs_path *path;
2989 struct btrfs_inode_ref *ref;
2990 struct btrfs_key key[2];
2996 path = btrfs_alloc_path();
2999 inode = new_inode(root->fs_info->sb);
3001 return ERR_PTR(-ENOMEM);
3004 ret = btrfs_set_inode_index(dir, inode, index);
3006 return ERR_PTR(ret);
3009 * index_cnt is ignored for everything but a dir,
3010 * btrfs_get_inode_index_count has an explanation for the magic
3013 init_btrfs_i(inode);
3014 BTRFS_I(inode)->index_cnt = 2;
3015 BTRFS_I(inode)->root = root;
3016 BTRFS_I(inode)->generation = trans->transid;
3022 new_inode_group = btrfs_find_block_group(root, group, 0,
3023 BTRFS_BLOCK_GROUP_METADATA, owner);
3024 if (!new_inode_group) {
3025 printk("find_block group failed\n");
3026 new_inode_group = group;
3028 BTRFS_I(inode)->block_group = new_inode_group;
3030 key[0].objectid = objectid;
3031 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3034 key[1].objectid = objectid;
3035 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3036 key[1].offset = ref_objectid;
3038 sizes[0] = sizeof(struct btrfs_inode_item);
3039 sizes[1] = name_len + sizeof(*ref);
3041 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3045 if (objectid > root->highest_inode)
3046 root->highest_inode = objectid;
3048 inode->i_uid = current->fsuid;
3049 inode->i_gid = current->fsgid;
3050 inode->i_mode = mode;
3051 inode->i_ino = objectid;
3052 inode_set_bytes(inode, 0);
3053 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3054 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3055 struct btrfs_inode_item);
3056 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3058 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3059 struct btrfs_inode_ref);
3060 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3061 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3062 ptr = (unsigned long)(ref + 1);
3063 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3065 btrfs_mark_buffer_dirty(path->nodes[0]);
3066 btrfs_free_path(path);
3068 location = &BTRFS_I(inode)->location;
3069 location->objectid = objectid;
3070 location->offset = 0;
3071 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3073 insert_inode_hash(inode);
3077 BTRFS_I(dir)->index_cnt--;
3078 btrfs_free_path(path);
3079 return ERR_PTR(ret);
3082 static inline u8 btrfs_inode_type(struct inode *inode)
3084 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3088 * utility function to add 'inode' into 'parent_inode' with
3089 * a give name and a given sequence number.
3090 * if 'add_backref' is true, also insert a backref from the
3091 * inode to the parent directory.
3093 int btrfs_add_link(struct btrfs_trans_handle *trans,
3094 struct inode *parent_inode, struct inode *inode,
3095 const char *name, int name_len, int add_backref, u64 index)
3098 struct btrfs_key key;
3099 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3101 key.objectid = inode->i_ino;
3102 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3105 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3106 parent_inode->i_ino,
3107 &key, btrfs_inode_type(inode),
3111 ret = btrfs_insert_inode_ref(trans, root,
3114 parent_inode->i_ino,
3117 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3119 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3120 ret = btrfs_update_inode(trans, root, parent_inode);
3125 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3126 struct dentry *dentry, struct inode *inode,
3127 int backref, u64 index)
3129 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3130 inode, dentry->d_name.name,
3131 dentry->d_name.len, backref, index);
3133 d_instantiate(dentry, inode);
3141 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3142 int mode, dev_t rdev)
3144 struct btrfs_trans_handle *trans;
3145 struct btrfs_root *root = BTRFS_I(dir)->root;
3146 struct inode *inode = NULL;
3150 unsigned long nr = 0;
3153 if (!new_valid_dev(rdev))
3156 err = btrfs_check_free_space(root, 1, 0);
3160 trans = btrfs_start_transaction(root, 1);
3161 btrfs_set_trans_block_group(trans, dir);
3163 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3169 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3171 dentry->d_parent->d_inode->i_ino, objectid,
3172 BTRFS_I(dir)->block_group, mode, &index);
3173 err = PTR_ERR(inode);
3177 err = btrfs_init_acl(inode, dir);
3183 btrfs_set_trans_block_group(trans, inode);
3184 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3188 inode->i_op = &btrfs_special_inode_operations;
3189 init_special_inode(inode, inode->i_mode, rdev);
3190 btrfs_update_inode(trans, root, inode);
3192 dir->i_sb->s_dirt = 1;
3193 btrfs_update_inode_block_group(trans, inode);
3194 btrfs_update_inode_block_group(trans, dir);
3196 nr = trans->blocks_used;
3197 btrfs_end_transaction_throttle(trans, root);
3200 inode_dec_link_count(inode);
3203 btrfs_btree_balance_dirty(root, nr);
3207 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3208 int mode, struct nameidata *nd)
3210 struct btrfs_trans_handle *trans;
3211 struct btrfs_root *root = BTRFS_I(dir)->root;
3212 struct inode *inode = NULL;
3215 unsigned long nr = 0;
3219 err = btrfs_check_free_space(root, 1, 0);
3222 trans = btrfs_start_transaction(root, 1);
3223 btrfs_set_trans_block_group(trans, dir);
3225 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3231 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3233 dentry->d_parent->d_inode->i_ino,
3234 objectid, BTRFS_I(dir)->block_group, mode,
3236 err = PTR_ERR(inode);
3240 err = btrfs_init_acl(inode, dir);
3246 btrfs_set_trans_block_group(trans, inode);
3247 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3251 inode->i_mapping->a_ops = &btrfs_aops;
3252 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3253 inode->i_fop = &btrfs_file_operations;
3254 inode->i_op = &btrfs_file_inode_operations;
3255 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3257 dir->i_sb->s_dirt = 1;
3258 btrfs_update_inode_block_group(trans, inode);
3259 btrfs_update_inode_block_group(trans, dir);
3261 nr = trans->blocks_used;
3262 btrfs_end_transaction_throttle(trans, root);
3265 inode_dec_link_count(inode);
3268 btrfs_btree_balance_dirty(root, nr);
3272 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3273 struct dentry *dentry)
3275 struct btrfs_trans_handle *trans;
3276 struct btrfs_root *root = BTRFS_I(dir)->root;
3277 struct inode *inode = old_dentry->d_inode;
3279 unsigned long nr = 0;
3283 if (inode->i_nlink == 0)
3286 btrfs_inc_nlink(inode);
3287 err = btrfs_check_free_space(root, 1, 0);
3290 err = btrfs_set_inode_index(dir, inode, &index);
3294 trans = btrfs_start_transaction(root, 1);
3296 btrfs_set_trans_block_group(trans, dir);
3297 atomic_inc(&inode->i_count);
3299 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3304 dir->i_sb->s_dirt = 1;
3305 btrfs_update_inode_block_group(trans, dir);
3306 err = btrfs_update_inode(trans, root, inode);
3311 nr = trans->blocks_used;
3312 btrfs_end_transaction_throttle(trans, root);
3315 inode_dec_link_count(inode);
3318 btrfs_btree_balance_dirty(root, nr);
3322 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3324 struct inode *inode = NULL;
3325 struct btrfs_trans_handle *trans;
3326 struct btrfs_root *root = BTRFS_I(dir)->root;
3328 int drop_on_err = 0;
3331 unsigned long nr = 1;
3333 err = btrfs_check_free_space(root, 1, 0);
3337 trans = btrfs_start_transaction(root, 1);
3338 btrfs_set_trans_block_group(trans, dir);
3340 if (IS_ERR(trans)) {
3341 err = PTR_ERR(trans);
3345 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3351 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3353 dentry->d_parent->d_inode->i_ino, objectid,
3354 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3356 if (IS_ERR(inode)) {
3357 err = PTR_ERR(inode);
3363 err = btrfs_init_acl(inode, dir);
3367 inode->i_op = &btrfs_dir_inode_operations;
3368 inode->i_fop = &btrfs_dir_file_operations;
3369 btrfs_set_trans_block_group(trans, inode);
3371 btrfs_i_size_write(inode, 0);
3372 err = btrfs_update_inode(trans, root, inode);
3376 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3377 inode, dentry->d_name.name,
3378 dentry->d_name.len, 0, index);
3382 d_instantiate(dentry, inode);
3384 dir->i_sb->s_dirt = 1;
3385 btrfs_update_inode_block_group(trans, inode);
3386 btrfs_update_inode_block_group(trans, dir);
3389 nr = trans->blocks_used;
3390 btrfs_end_transaction_throttle(trans, root);
3395 btrfs_btree_balance_dirty(root, nr);
3399 /* helper for btfs_get_extent. Given an existing extent in the tree,
3400 * and an extent that you want to insert, deal with overlap and insert
3401 * the new extent into the tree.
3403 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3404 struct extent_map *existing,
3405 struct extent_map *em,
3406 u64 map_start, u64 map_len)
3410 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
3411 start_diff = map_start - em->start;
3412 em->start = map_start;
3414 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
3415 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
3416 em->block_start += start_diff;
3417 em->block_len -= start_diff;
3419 return add_extent_mapping(em_tree, em);
3422 static noinline int uncompress_inline(struct btrfs_path *path,
3423 struct inode *inode, struct page *page,
3424 size_t pg_offset, u64 extent_offset,
3425 struct btrfs_file_extent_item *item)
3428 struct extent_buffer *leaf = path->nodes[0];
3431 unsigned long inline_size;
3434 WARN_ON(pg_offset != 0);
3435 max_size = btrfs_file_extent_ram_bytes(leaf, item);
3436 inline_size = btrfs_file_extent_inline_item_len(leaf,
3437 btrfs_item_nr(leaf, path->slots[0]));
3438 tmp = kmalloc(inline_size, GFP_NOFS);
3439 ptr = btrfs_file_extent_inline_start(item);
3441 read_extent_buffer(leaf, tmp, ptr, inline_size);
3443 max_size = min(PAGE_CACHE_SIZE, max_size);
3444 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
3445 inline_size, max_size);
3447 char *kaddr = kmap_atomic(page, KM_USER0);
3448 unsigned long copy_size = min_t(u64,
3449 PAGE_CACHE_SIZE - pg_offset,
3450 max_size - extent_offset);
3451 memset(kaddr + pg_offset, 0, copy_size);
3452 kunmap_atomic(kaddr, KM_USER0);
3459 * a bit scary, this does extent mapping from logical file offset to the disk.
3460 * the ugly parts come from merging extents from the disk with the
3461 * in-ram representation. This gets more complex because of the data=ordered code,
3462 * where the in-ram extents might be locked pending data=ordered completion.
3464 * This also copies inline extents directly into the page.
3466 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
3467 size_t pg_offset, u64 start, u64 len,
3473 u64 extent_start = 0;
3475 u64 objectid = inode->i_ino;
3477 struct btrfs_path *path = NULL;
3478 struct btrfs_root *root = BTRFS_I(inode)->root;
3479 struct btrfs_file_extent_item *item;
3480 struct extent_buffer *leaf;
3481 struct btrfs_key found_key;
3482 struct extent_map *em = NULL;
3483 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3484 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3485 struct btrfs_trans_handle *trans = NULL;
3489 spin_lock(&em_tree->lock);
3490 em = lookup_extent_mapping(em_tree, start, len);
3492 em->bdev = root->fs_info->fs_devices->latest_bdev;
3493 spin_unlock(&em_tree->lock);
3496 if (em->start > start || em->start + em->len <= start)
3497 free_extent_map(em);
3498 else if (em->block_start == EXTENT_MAP_INLINE && page)
3499 free_extent_map(em);
3503 em = alloc_extent_map(GFP_NOFS);
3508 em->bdev = root->fs_info->fs_devices->latest_bdev;
3509 em->start = EXTENT_MAP_HOLE;
3511 em->block_len = (u64)-1;
3514 path = btrfs_alloc_path();
3518 ret = btrfs_lookup_file_extent(trans, root, path,
3519 objectid, start, trans != NULL);
3526 if (path->slots[0] == 0)
3531 leaf = path->nodes[0];
3532 item = btrfs_item_ptr(leaf, path->slots[0],
3533 struct btrfs_file_extent_item);
3534 /* are we inside the extent that was found? */
3535 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3536 found_type = btrfs_key_type(&found_key);
3537 if (found_key.objectid != objectid ||
3538 found_type != BTRFS_EXTENT_DATA_KEY) {
3542 found_type = btrfs_file_extent_type(leaf, item);
3543 extent_start = found_key.offset;
3544 compressed = btrfs_file_extent_compression(leaf, item);
3545 if (found_type == BTRFS_FILE_EXTENT_REG ||
3546 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3547 extent_end = extent_start +
3548 btrfs_file_extent_num_bytes(leaf, item);
3549 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3551 size = btrfs_file_extent_inline_len(leaf, item);
3552 extent_end = (extent_start + size + root->sectorsize - 1) &
3553 ~((u64)root->sectorsize - 1);
3556 if (start >= extent_end) {
3558 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3559 ret = btrfs_next_leaf(root, path);
3566 leaf = path->nodes[0];
3568 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3569 if (found_key.objectid != objectid ||
3570 found_key.type != BTRFS_EXTENT_DATA_KEY)
3572 if (start + len <= found_key.offset)
3575 em->len = found_key.offset - start;
3579 if (found_type == BTRFS_FILE_EXTENT_REG ||
3580 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
3581 em->start = extent_start;
3582 em->len = extent_end - extent_start;
3583 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
3585 em->block_start = EXTENT_MAP_HOLE;
3589 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3590 em->block_start = bytenr;
3591 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
3594 bytenr += btrfs_file_extent_offset(leaf, item);
3595 em->block_start = bytenr;
3596 em->block_len = em->len;
3597 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
3598 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
3601 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3605 size_t extent_offset;
3608 em->block_start = EXTENT_MAP_INLINE;
3609 if (!page || create) {
3610 em->start = extent_start;
3611 em->len = extent_end - extent_start;
3615 size = btrfs_file_extent_inline_len(leaf, item);
3616 extent_offset = page_offset(page) + pg_offset - extent_start;
3617 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
3618 size - extent_offset);
3619 em->start = extent_start + extent_offset;
3620 em->len = (copy_size + root->sectorsize - 1) &
3621 ~((u64)root->sectorsize - 1);
3623 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
3624 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
3625 if (create == 0 && !PageUptodate(page)) {
3626 if (btrfs_file_extent_compression(leaf, item) ==
3627 BTRFS_COMPRESS_ZLIB) {
3628 ret = uncompress_inline(path, inode, page,
3630 extent_offset, item);
3634 read_extent_buffer(leaf, map + pg_offset, ptr,
3638 flush_dcache_page(page);
3639 } else if (create && PageUptodate(page)) {
3642 free_extent_map(em);
3644 btrfs_release_path(root, path);
3645 trans = btrfs_join_transaction(root, 1);
3649 write_extent_buffer(leaf, map + pg_offset, ptr,
3652 btrfs_mark_buffer_dirty(leaf);
3654 set_extent_uptodate(io_tree, em->start,
3655 extent_map_end(em) - 1, GFP_NOFS);
3658 printk("unkknown found_type %d\n", found_type);
3665 em->block_start = EXTENT_MAP_HOLE;
3666 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
3668 btrfs_release_path(root, path);
3669 if (em->start > start || extent_map_end(em) <= start) {
3670 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
3676 spin_lock(&em_tree->lock);
3677 ret = add_extent_mapping(em_tree, em);
3678 /* it is possible that someone inserted the extent into the tree
3679 * while we had the lock dropped. It is also possible that
3680 * an overlapping map exists in the tree
3682 if (ret == -EEXIST) {
3683 struct extent_map *existing;
3687 existing = lookup_extent_mapping(em_tree, start, len);
3688 if (existing && (existing->start > start ||
3689 existing->start + existing->len <= start)) {
3690 free_extent_map(existing);
3694 existing = lookup_extent_mapping(em_tree, em->start,
3697 err = merge_extent_mapping(em_tree, existing,
3700 free_extent_map(existing);
3702 free_extent_map(em);
3707 printk("failing to insert %Lu %Lu\n",
3709 free_extent_map(em);
3713 free_extent_map(em);
3718 spin_unlock(&em_tree->lock);
3721 btrfs_free_path(path);
3723 ret = btrfs_end_transaction(trans, root);
3729 free_extent_map(em);
3731 return ERR_PTR(err);
3736 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
3737 const struct iovec *iov, loff_t offset,
3738 unsigned long nr_segs)
3743 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
3745 return extent_bmap(mapping, iblock, btrfs_get_extent);
3748 int btrfs_readpage(struct file *file, struct page *page)
3750 struct extent_io_tree *tree;
3751 tree = &BTRFS_I(page->mapping->host)->io_tree;
3752 return extent_read_full_page(tree, page, btrfs_get_extent);
3755 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
3757 struct extent_io_tree *tree;
3760 if (current->flags & PF_MEMALLOC) {
3761 redirty_page_for_writepage(wbc, page);
3765 tree = &BTRFS_I(page->mapping->host)->io_tree;
3766 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
3769 int btrfs_writepages(struct address_space *mapping,
3770 struct writeback_control *wbc)
3772 struct extent_io_tree *tree;
3773 tree = &BTRFS_I(mapping->host)->io_tree;
3774 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
3778 btrfs_readpages(struct file *file, struct address_space *mapping,
3779 struct list_head *pages, unsigned nr_pages)
3781 struct extent_io_tree *tree;
3782 tree = &BTRFS_I(mapping->host)->io_tree;
3783 return extent_readpages(tree, mapping, pages, nr_pages,
3786 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3788 struct extent_io_tree *tree;
3789 struct extent_map_tree *map;
3792 tree = &BTRFS_I(page->mapping->host)->io_tree;
3793 map = &BTRFS_I(page->mapping->host)->extent_tree;
3794 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
3796 ClearPagePrivate(page);
3797 set_page_private(page, 0);
3798 page_cache_release(page);
3803 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
3805 if (PageWriteback(page) || PageDirty(page))
3807 return __btrfs_releasepage(page, gfp_flags);
3810 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
3812 struct extent_io_tree *tree;
3813 struct btrfs_ordered_extent *ordered;
3814 u64 page_start = page_offset(page);
3815 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
3817 wait_on_page_writeback(page);
3818 tree = &BTRFS_I(page->mapping->host)->io_tree;
3820 btrfs_releasepage(page, GFP_NOFS);
3824 lock_extent(tree, page_start, page_end, GFP_NOFS);
3825 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
3829 * IO on this page will never be started, so we need
3830 * to account for any ordered extents now
3832 clear_extent_bit(tree, page_start, page_end,
3833 EXTENT_DIRTY | EXTENT_DELALLOC |
3834 EXTENT_LOCKED, 1, 0, GFP_NOFS);
3835 btrfs_finish_ordered_io(page->mapping->host,
3836 page_start, page_end);
3837 btrfs_put_ordered_extent(ordered);
3838 lock_extent(tree, page_start, page_end, GFP_NOFS);
3840 clear_extent_bit(tree, page_start, page_end,
3841 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
3844 __btrfs_releasepage(page, GFP_NOFS);
3846 ClearPageChecked(page);
3847 if (PagePrivate(page)) {
3848 ClearPagePrivate(page);
3849 set_page_private(page, 0);
3850 page_cache_release(page);
3855 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
3856 * called from a page fault handler when a page is first dirtied. Hence we must
3857 * be careful to check for EOF conditions here. We set the page up correctly
3858 * for a written page which means we get ENOSPC checking when writing into
3859 * holes and correct delalloc and unwritten extent mapping on filesystems that
3860 * support these features.
3862 * We are not allowed to take the i_mutex here so we have to play games to
3863 * protect against truncate races as the page could now be beyond EOF. Because
3864 * vmtruncate() writes the inode size before removing pages, once we have the
3865 * page lock we can determine safely if the page is beyond EOF. If it is not
3866 * beyond EOF, then the page is guaranteed safe against truncation until we
3869 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
3871 struct inode *inode = fdentry(vma->vm_file)->d_inode;
3872 struct btrfs_root *root = BTRFS_I(inode)->root;
3873 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3874 struct btrfs_ordered_extent *ordered;
3876 unsigned long zero_start;
3882 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
3889 size = i_size_read(inode);
3890 page_start = page_offset(page);
3891 page_end = page_start + PAGE_CACHE_SIZE - 1;
3893 if ((page->mapping != inode->i_mapping) ||
3894 (page_start >= size)) {
3895 /* page got truncated out from underneath us */
3898 wait_on_page_writeback(page);
3900 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3901 set_page_extent_mapped(page);
3904 * we can't set the delalloc bits if there are pending ordered
3905 * extents. Drop our locks and wait for them to finish
3907 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3909 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3911 btrfs_start_ordered_extent(inode, ordered, 1);
3912 btrfs_put_ordered_extent(ordered);
3916 btrfs_set_extent_delalloc(inode, page_start, page_end);
3919 /* page is wholly or partially inside EOF */
3920 if (page_start + PAGE_CACHE_SIZE > size)
3921 zero_start = size & ~PAGE_CACHE_MASK;
3923 zero_start = PAGE_CACHE_SIZE;
3925 if (zero_start != PAGE_CACHE_SIZE) {
3927 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
3928 flush_dcache_page(page);
3931 ClearPageChecked(page);
3932 set_page_dirty(page);
3933 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3941 static void btrfs_truncate(struct inode *inode)
3943 struct btrfs_root *root = BTRFS_I(inode)->root;
3945 struct btrfs_trans_handle *trans;
3947 u64 mask = root->sectorsize - 1;
3949 if (!S_ISREG(inode->i_mode))
3951 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
3954 btrfs_truncate_page(inode->i_mapping, inode->i_size);
3955 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
3957 trans = btrfs_start_transaction(root, 1);
3958 btrfs_set_trans_block_group(trans, inode);
3959 btrfs_i_size_write(inode, inode->i_size);
3961 ret = btrfs_orphan_add(trans, inode);
3964 /* FIXME, add redo link to tree so we don't leak on crash */
3965 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
3966 BTRFS_EXTENT_DATA_KEY);
3967 btrfs_update_inode(trans, root, inode);
3969 ret = btrfs_orphan_del(trans, inode);
3973 nr = trans->blocks_used;
3974 ret = btrfs_end_transaction_throttle(trans, root);
3976 btrfs_btree_balance_dirty(root, nr);
3980 * Invalidate a single dcache entry at the root of the filesystem.
3981 * Needed after creation of snapshot or subvolume.
3983 void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name,
3986 struct dentry *alias, *entry;
3989 alias = d_find_alias(root->fs_info->sb->s_root->d_inode);
3993 /* change me if btrfs ever gets a d_hash operation */
3994 qstr.hash = full_name_hash(qstr.name, qstr.len);
3995 entry = d_lookup(alias, &qstr);
3998 d_invalidate(entry);
4005 * create a new subvolume directory/inode (helper for the ioctl).
4007 int btrfs_create_subvol_root(struct btrfs_root *new_root, struct dentry *dentry,
4008 struct btrfs_trans_handle *trans, u64 new_dirid,
4009 struct btrfs_block_group_cache *block_group)
4011 struct inode *inode;
4015 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4016 new_dirid, block_group, S_IFDIR | 0700, &index);
4018 return PTR_ERR(inode);
4019 inode->i_op = &btrfs_dir_inode_operations;
4020 inode->i_fop = &btrfs_dir_file_operations;
4021 new_root->inode = inode;
4024 btrfs_i_size_write(inode, 0);
4026 error = btrfs_update_inode(trans, new_root, inode);
4030 atomic_inc(&inode->i_count);
4031 d_instantiate(dentry, inode);
4035 /* helper function for file defrag and space balancing. This
4036 * forces readahead on a given range of bytes in an inode
4038 unsigned long btrfs_force_ra(struct address_space *mapping,
4039 struct file_ra_state *ra, struct file *file,
4040 pgoff_t offset, pgoff_t last_index)
4042 pgoff_t req_size = last_index - offset + 1;
4044 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4045 return offset + req_size;
4048 struct inode *btrfs_alloc_inode(struct super_block *sb)
4050 struct btrfs_inode *ei;
4052 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4056 ei->logged_trans = 0;
4057 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4058 ei->i_acl = BTRFS_ACL_NOT_CACHED;
4059 ei->i_default_acl = BTRFS_ACL_NOT_CACHED;
4060 INIT_LIST_HEAD(&ei->i_orphan);
4061 return &ei->vfs_inode;
4064 void btrfs_destroy_inode(struct inode *inode)
4066 struct btrfs_ordered_extent *ordered;
4067 WARN_ON(!list_empty(&inode->i_dentry));
4068 WARN_ON(inode->i_data.nrpages);
4070 if (BTRFS_I(inode)->i_acl &&
4071 BTRFS_I(inode)->i_acl != BTRFS_ACL_NOT_CACHED)
4072 posix_acl_release(BTRFS_I(inode)->i_acl);
4073 if (BTRFS_I(inode)->i_default_acl &&
4074 BTRFS_I(inode)->i_default_acl != BTRFS_ACL_NOT_CACHED)
4075 posix_acl_release(BTRFS_I(inode)->i_default_acl);
4077 spin_lock(&BTRFS_I(inode)->root->list_lock);
4078 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4079 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4080 " list\n", inode->i_ino);
4083 spin_unlock(&BTRFS_I(inode)->root->list_lock);
4086 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4090 printk("found ordered extent %Lu %Lu\n",
4091 ordered->file_offset, ordered->len);
4092 btrfs_remove_ordered_extent(inode, ordered);
4093 btrfs_put_ordered_extent(ordered);
4094 btrfs_put_ordered_extent(ordered);
4097 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4098 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4101 static void init_once(void *foo)
4103 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4105 inode_init_once(&ei->vfs_inode);
4108 void btrfs_destroy_cachep(void)
4110 if (btrfs_inode_cachep)
4111 kmem_cache_destroy(btrfs_inode_cachep);
4112 if (btrfs_trans_handle_cachep)
4113 kmem_cache_destroy(btrfs_trans_handle_cachep);
4114 if (btrfs_transaction_cachep)
4115 kmem_cache_destroy(btrfs_transaction_cachep);
4116 if (btrfs_bit_radix_cachep)
4117 kmem_cache_destroy(btrfs_bit_radix_cachep);
4118 if (btrfs_path_cachep)
4119 kmem_cache_destroy(btrfs_path_cachep);
4122 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
4123 unsigned long extra_flags,
4124 void (*ctor)(void *))
4126 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
4127 SLAB_MEM_SPREAD | extra_flags), ctor);
4130 int btrfs_init_cachep(void)
4132 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
4133 sizeof(struct btrfs_inode),
4135 if (!btrfs_inode_cachep)
4137 btrfs_trans_handle_cachep =
4138 btrfs_cache_create("btrfs_trans_handle_cache",
4139 sizeof(struct btrfs_trans_handle),
4141 if (!btrfs_trans_handle_cachep)
4143 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
4144 sizeof(struct btrfs_transaction),
4146 if (!btrfs_transaction_cachep)
4148 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
4149 sizeof(struct btrfs_path),
4151 if (!btrfs_path_cachep)
4153 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
4154 SLAB_DESTROY_BY_RCU, NULL);
4155 if (!btrfs_bit_radix_cachep)
4159 btrfs_destroy_cachep();
4163 static int btrfs_getattr(struct vfsmount *mnt,
4164 struct dentry *dentry, struct kstat *stat)
4166 struct inode *inode = dentry->d_inode;
4167 generic_fillattr(inode, stat);
4168 stat->blksize = PAGE_CACHE_SIZE;
4169 stat->blocks = (inode_get_bytes(inode) +
4170 BTRFS_I(inode)->delalloc_bytes) >> 9;
4174 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
4175 struct inode * new_dir,struct dentry *new_dentry)
4177 struct btrfs_trans_handle *trans;
4178 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4179 struct inode *new_inode = new_dentry->d_inode;
4180 struct inode *old_inode = old_dentry->d_inode;
4181 struct timespec ctime = CURRENT_TIME;
4185 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4186 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4190 ret = btrfs_check_free_space(root, 1, 0);
4194 trans = btrfs_start_transaction(root, 1);
4196 btrfs_set_trans_block_group(trans, new_dir);
4198 btrfs_inc_nlink(old_dentry->d_inode);
4199 old_dir->i_ctime = old_dir->i_mtime = ctime;
4200 new_dir->i_ctime = new_dir->i_mtime = ctime;
4201 old_inode->i_ctime = ctime;
4203 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4204 old_dentry->d_name.name,
4205 old_dentry->d_name.len);
4210 new_inode->i_ctime = CURRENT_TIME;
4211 ret = btrfs_unlink_inode(trans, root, new_dir,
4212 new_dentry->d_inode,
4213 new_dentry->d_name.name,
4214 new_dentry->d_name.len);
4217 if (new_inode->i_nlink == 0) {
4218 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4224 ret = btrfs_set_inode_index(new_dir, old_inode, &index);
4228 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4229 old_inode, new_dentry->d_name.name,
4230 new_dentry->d_name.len, 1, index);
4235 btrfs_end_transaction_throttle(trans, root);
4241 * some fairly slow code that needs optimization. This walks the list
4242 * of all the inodes with pending delalloc and forces them to disk.
4244 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4246 struct list_head *head = &root->fs_info->delalloc_inodes;
4247 struct btrfs_inode *binode;
4248 struct inode *inode;
4249 unsigned long flags;
4251 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4252 while(!list_empty(head)) {
4253 binode = list_entry(head->next, struct btrfs_inode,
4255 inode = igrab(&binode->vfs_inode);
4257 list_del_init(&binode->delalloc_inodes);
4258 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4260 filemap_flush(inode->i_mapping);
4264 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
4266 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
4268 /* the filemap_flush will queue IO into the worker threads, but
4269 * we have to make sure the IO is actually started and that
4270 * ordered extents get created before we return
4272 atomic_inc(&root->fs_info->async_submit_draining);
4273 while(atomic_read(&root->fs_info->nr_async_submits)) {
4274 wait_event(root->fs_info->async_submit_wait,
4275 (atomic_read(&root->fs_info->nr_async_submits) == 0));
4277 atomic_dec(&root->fs_info->async_submit_draining);
4281 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4282 const char *symname)
4284 struct btrfs_trans_handle *trans;
4285 struct btrfs_root *root = BTRFS_I(dir)->root;
4286 struct btrfs_path *path;
4287 struct btrfs_key key;
4288 struct inode *inode = NULL;
4296 struct btrfs_file_extent_item *ei;
4297 struct extent_buffer *leaf;
4298 unsigned long nr = 0;
4300 name_len = strlen(symname) + 1;
4301 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4302 return -ENAMETOOLONG;
4304 err = btrfs_check_free_space(root, 1, 0);
4308 trans = btrfs_start_transaction(root, 1);
4309 btrfs_set_trans_block_group(trans, dir);
4311 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4317 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4319 dentry->d_parent->d_inode->i_ino, objectid,
4320 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4322 err = PTR_ERR(inode);
4326 err = btrfs_init_acl(inode, dir);
4332 btrfs_set_trans_block_group(trans, inode);
4333 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4337 inode->i_mapping->a_ops = &btrfs_aops;
4338 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4339 inode->i_fop = &btrfs_file_operations;
4340 inode->i_op = &btrfs_file_inode_operations;
4341 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4343 dir->i_sb->s_dirt = 1;
4344 btrfs_update_inode_block_group(trans, inode);
4345 btrfs_update_inode_block_group(trans, dir);
4349 path = btrfs_alloc_path();
4351 key.objectid = inode->i_ino;
4353 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
4354 datasize = btrfs_file_extent_calc_inline_size(name_len);
4355 err = btrfs_insert_empty_item(trans, root, path, &key,
4361 leaf = path->nodes[0];
4362 ei = btrfs_item_ptr(leaf, path->slots[0],
4363 struct btrfs_file_extent_item);
4364 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
4365 btrfs_set_file_extent_type(leaf, ei,
4366 BTRFS_FILE_EXTENT_INLINE);
4367 btrfs_set_file_extent_encryption(leaf, ei, 0);
4368 btrfs_set_file_extent_compression(leaf, ei, 0);
4369 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
4370 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
4372 ptr = btrfs_file_extent_inline_start(ei);
4373 write_extent_buffer(leaf, symname, ptr, name_len);
4374 btrfs_mark_buffer_dirty(leaf);
4375 btrfs_free_path(path);
4377 inode->i_op = &btrfs_symlink_inode_operations;
4378 inode->i_mapping->a_ops = &btrfs_symlink_aops;
4379 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4380 inode_set_bytes(inode, name_len);
4381 btrfs_i_size_write(inode, name_len - 1);
4382 err = btrfs_update_inode(trans, root, inode);
4387 nr = trans->blocks_used;
4388 btrfs_end_transaction_throttle(trans, root);
4391 inode_dec_link_count(inode);
4394 btrfs_btree_balance_dirty(root, nr);
4398 static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
4399 u64 alloc_hint, int mode)
4401 struct btrfs_trans_handle *trans;
4402 struct btrfs_root *root = BTRFS_I(inode)->root;
4403 struct btrfs_key ins;
4405 u64 cur_offset = start;
4406 u64 num_bytes = end - start;
4409 trans = btrfs_join_transaction(root, 1);
4411 btrfs_set_trans_block_group(trans, inode);
4413 while (num_bytes > 0) {
4414 alloc_size = min(num_bytes, root->fs_info->max_extent);
4415 ret = btrfs_reserve_extent(trans, root, alloc_size,
4416 root->sectorsize, 0, alloc_hint,
4422 ret = insert_reserved_file_extent(trans, inode,
4423 cur_offset, ins.objectid,
4424 ins.offset, ins.offset,
4425 ins.offset, 0, 0, 0,
4426 BTRFS_FILE_EXTENT_PREALLOC);
4428 num_bytes -= ins.offset;
4429 cur_offset += ins.offset;
4430 alloc_hint = ins.objectid + ins.offset;
4433 if (cur_offset > start) {
4434 inode->i_ctime = CURRENT_TIME;
4435 btrfs_set_flag(inode, PREALLOC);
4436 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4437 cur_offset > i_size_read(inode))
4438 btrfs_i_size_write(inode, cur_offset);
4439 ret = btrfs_update_inode(trans, root, inode);
4443 btrfs_end_transaction(trans, root);
4447 static long btrfs_fallocate(struct inode *inode, int mode,
4448 loff_t offset, loff_t len)
4455 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
4456 struct extent_map *em;
4459 alloc_start = offset & ~mask;
4460 alloc_end = (offset + len + mask) & ~mask;
4462 mutex_lock(&inode->i_mutex);
4463 if (alloc_start > inode->i_size) {
4464 ret = btrfs_cont_expand(inode, alloc_start);
4470 struct btrfs_ordered_extent *ordered;
4471 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start,
4472 alloc_end - 1, GFP_NOFS);
4473 ordered = btrfs_lookup_first_ordered_extent(inode,
4476 ordered->file_offset + ordered->len > alloc_start &&
4477 ordered->file_offset < alloc_end) {
4478 btrfs_put_ordered_extent(ordered);
4479 unlock_extent(&BTRFS_I(inode)->io_tree,
4480 alloc_start, alloc_end - 1, GFP_NOFS);
4481 btrfs_wait_ordered_range(inode, alloc_start,
4482 alloc_end - alloc_start);
4485 btrfs_put_ordered_extent(ordered);
4490 cur_offset = alloc_start;
4492 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4493 alloc_end - cur_offset, 0);
4494 BUG_ON(IS_ERR(em) || !em);
4495 last_byte = min(extent_map_end(em), alloc_end);
4496 last_byte = (last_byte + mask) & ~mask;
4497 if (em->block_start == EXTENT_MAP_HOLE) {
4498 ret = prealloc_file_range(inode, cur_offset,
4499 last_byte, alloc_hint, mode);
4501 free_extent_map(em);
4505 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
4506 alloc_hint = em->block_start;
4507 free_extent_map(em);
4509 cur_offset = last_byte;
4510 if (cur_offset >= alloc_end) {
4515 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, alloc_end - 1,
4518 mutex_unlock(&inode->i_mutex);
4522 static int btrfs_set_page_dirty(struct page *page)
4524 return __set_page_dirty_nobuffers(page);
4527 static int btrfs_permission(struct inode *inode, int mask)
4529 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
4531 return generic_permission(inode, mask, btrfs_check_acl);
4534 static struct inode_operations btrfs_dir_inode_operations = {
4535 .lookup = btrfs_lookup,
4536 .create = btrfs_create,
4537 .unlink = btrfs_unlink,
4539 .mkdir = btrfs_mkdir,
4540 .rmdir = btrfs_rmdir,
4541 .rename = btrfs_rename,
4542 .symlink = btrfs_symlink,
4543 .setattr = btrfs_setattr,
4544 .mknod = btrfs_mknod,
4545 .setxattr = btrfs_setxattr,
4546 .getxattr = btrfs_getxattr,
4547 .listxattr = btrfs_listxattr,
4548 .removexattr = btrfs_removexattr,
4549 .permission = btrfs_permission,
4551 static struct inode_operations btrfs_dir_ro_inode_operations = {
4552 .lookup = btrfs_lookup,
4553 .permission = btrfs_permission,
4555 static struct file_operations btrfs_dir_file_operations = {
4556 .llseek = generic_file_llseek,
4557 .read = generic_read_dir,
4558 .readdir = btrfs_real_readdir,
4559 .unlocked_ioctl = btrfs_ioctl,
4560 #ifdef CONFIG_COMPAT
4561 .compat_ioctl = btrfs_ioctl,
4563 .release = btrfs_release_file,
4564 .fsync = btrfs_sync_file,
4567 static struct extent_io_ops btrfs_extent_io_ops = {
4568 .fill_delalloc = run_delalloc_range,
4569 .submit_bio_hook = btrfs_submit_bio_hook,
4570 .merge_bio_hook = btrfs_merge_bio_hook,
4571 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
4572 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
4573 .writepage_start_hook = btrfs_writepage_start_hook,
4574 .readpage_io_failed_hook = btrfs_io_failed_hook,
4575 .set_bit_hook = btrfs_set_bit_hook,
4576 .clear_bit_hook = btrfs_clear_bit_hook,
4579 static struct address_space_operations btrfs_aops = {
4580 .readpage = btrfs_readpage,
4581 .writepage = btrfs_writepage,
4582 .writepages = btrfs_writepages,
4583 .readpages = btrfs_readpages,
4584 .sync_page = block_sync_page,
4586 .direct_IO = btrfs_direct_IO,
4587 .invalidatepage = btrfs_invalidatepage,
4588 .releasepage = btrfs_releasepage,
4589 .set_page_dirty = btrfs_set_page_dirty,
4592 static struct address_space_operations btrfs_symlink_aops = {
4593 .readpage = btrfs_readpage,
4594 .writepage = btrfs_writepage,
4595 .invalidatepage = btrfs_invalidatepage,
4596 .releasepage = btrfs_releasepage,
4599 static struct inode_operations btrfs_file_inode_operations = {
4600 .truncate = btrfs_truncate,
4601 .getattr = btrfs_getattr,
4602 .setattr = btrfs_setattr,
4603 .setxattr = btrfs_setxattr,
4604 .getxattr = btrfs_getxattr,
4605 .listxattr = btrfs_listxattr,
4606 .removexattr = btrfs_removexattr,
4607 .permission = btrfs_permission,
4608 .fallocate = btrfs_fallocate,
4610 static struct inode_operations btrfs_special_inode_operations = {
4611 .getattr = btrfs_getattr,
4612 .setattr = btrfs_setattr,
4613 .permission = btrfs_permission,
4614 .setxattr = btrfs_setxattr,
4615 .getxattr = btrfs_getxattr,
4616 .listxattr = btrfs_listxattr,
4617 .removexattr = btrfs_removexattr,
4619 static struct inode_operations btrfs_symlink_inode_operations = {
4620 .readlink = generic_readlink,
4621 .follow_link = page_follow_link_light,
4622 .put_link = page_put_link,
4623 .permission = btrfs_permission,