]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - fs/btrfs/transaction.c
Btrfs: add and improve comments
[linux-2.6-omap-h63xx.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "locking.h"
27 #include "ref-cache.h"
28 #include "tree-log.h"
29
30 static int total_trans = 0;
31 extern struct kmem_cache *btrfs_trans_handle_cachep;
32 extern struct kmem_cache *btrfs_transaction_cachep;
33
34 #define BTRFS_ROOT_TRANS_TAG 0
35
36 static noinline void put_transaction(struct btrfs_transaction *transaction)
37 {
38         WARN_ON(transaction->use_count == 0);
39         transaction->use_count--;
40         if (transaction->use_count == 0) {
41                 WARN_ON(total_trans == 0);
42                 total_trans--;
43                 list_del_init(&transaction->list);
44                 memset(transaction, 0, sizeof(*transaction));
45                 kmem_cache_free(btrfs_transaction_cachep, transaction);
46         }
47 }
48
49 /*
50  * either allocate a new transaction or hop into the existing one
51  */
52 static noinline int join_transaction(struct btrfs_root *root)
53 {
54         struct btrfs_transaction *cur_trans;
55         cur_trans = root->fs_info->running_transaction;
56         if (!cur_trans) {
57                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
58                                              GFP_NOFS);
59                 total_trans++;
60                 BUG_ON(!cur_trans);
61                 root->fs_info->generation++;
62                 root->fs_info->last_alloc = 0;
63                 root->fs_info->last_data_alloc = 0;
64                 root->fs_info->last_log_alloc = 0;
65                 cur_trans->num_writers = 1;
66                 cur_trans->num_joined = 0;
67                 cur_trans->transid = root->fs_info->generation;
68                 init_waitqueue_head(&cur_trans->writer_wait);
69                 init_waitqueue_head(&cur_trans->commit_wait);
70                 cur_trans->in_commit = 0;
71                 cur_trans->blocked = 0;
72                 cur_trans->use_count = 1;
73                 cur_trans->commit_done = 0;
74                 cur_trans->start_time = get_seconds();
75                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
76                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
77                 extent_io_tree_init(&cur_trans->dirty_pages,
78                                      root->fs_info->btree_inode->i_mapping,
79                                      GFP_NOFS);
80                 spin_lock(&root->fs_info->new_trans_lock);
81                 root->fs_info->running_transaction = cur_trans;
82                 spin_unlock(&root->fs_info->new_trans_lock);
83         } else {
84                 cur_trans->num_writers++;
85                 cur_trans->num_joined++;
86         }
87
88         return 0;
89 }
90
91 /*
92  * this does all the record keeping required to make sure that a
93  * reference counted root is properly recorded in a given transaction.
94  * This is required to make sure the old root from before we joined the transaction
95  * is deleted when the transaction commits
96  */
97 noinline int btrfs_record_root_in_trans(struct btrfs_root *root)
98 {
99         struct btrfs_dirty_root *dirty;
100         u64 running_trans_id = root->fs_info->running_transaction->transid;
101         if (root->ref_cows && root->last_trans < running_trans_id) {
102                 WARN_ON(root == root->fs_info->extent_root);
103                 if (root->root_item.refs != 0) {
104                         radix_tree_tag_set(&root->fs_info->fs_roots_radix,
105                                    (unsigned long)root->root_key.objectid,
106                                    BTRFS_ROOT_TRANS_TAG);
107
108                         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
109                         BUG_ON(!dirty);
110                         dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
111                         BUG_ON(!dirty->root);
112                         dirty->latest_root = root;
113                         INIT_LIST_HEAD(&dirty->list);
114
115                         root->commit_root = btrfs_root_node(root);
116
117                         memcpy(dirty->root, root, sizeof(*root));
118                         spin_lock_init(&dirty->root->node_lock);
119                         spin_lock_init(&dirty->root->list_lock);
120                         mutex_init(&dirty->root->objectid_mutex);
121                         mutex_init(&dirty->root->log_mutex);
122                         INIT_LIST_HEAD(&dirty->root->dead_list);
123                         dirty->root->node = root->commit_root;
124                         dirty->root->commit_root = NULL;
125
126                         spin_lock(&root->list_lock);
127                         list_add(&dirty->root->dead_list, &root->dead_list);
128                         spin_unlock(&root->list_lock);
129
130                         root->dirty_root = dirty;
131                 } else {
132                         WARN_ON(1);
133                 }
134                 root->last_trans = running_trans_id;
135         }
136         return 0;
137 }
138
139 /* wait for commit against the current transaction to become unblocked
140  * when this is done, it is safe to start a new transaction, but the current
141  * transaction might not be fully on disk.
142  */
143 static void wait_current_trans(struct btrfs_root *root)
144 {
145         struct btrfs_transaction *cur_trans;
146
147         cur_trans = root->fs_info->running_transaction;
148         if (cur_trans && cur_trans->blocked) {
149                 DEFINE_WAIT(wait);
150                 cur_trans->use_count++;
151                 while(1) {
152                         prepare_to_wait(&root->fs_info->transaction_wait, &wait,
153                                         TASK_UNINTERRUPTIBLE);
154                         if (cur_trans->blocked) {
155                                 mutex_unlock(&root->fs_info->trans_mutex);
156                                 schedule();
157                                 mutex_lock(&root->fs_info->trans_mutex);
158                                 finish_wait(&root->fs_info->transaction_wait,
159                                             &wait);
160                         } else {
161                                 finish_wait(&root->fs_info->transaction_wait,
162                                             &wait);
163                                 break;
164                         }
165                 }
166                 put_transaction(cur_trans);
167         }
168 }
169
170 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
171                                              int num_blocks, int wait)
172 {
173         struct btrfs_trans_handle *h =
174                 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
175         int ret;
176
177         mutex_lock(&root->fs_info->trans_mutex);
178         if (!root->fs_info->log_root_recovering &&
179             ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2))
180                 wait_current_trans(root);
181         ret = join_transaction(root);
182         BUG_ON(ret);
183
184         btrfs_record_root_in_trans(root);
185         h->transid = root->fs_info->running_transaction->transid;
186         h->transaction = root->fs_info->running_transaction;
187         h->blocks_reserved = num_blocks;
188         h->blocks_used = 0;
189         h->block_group = NULL;
190         h->alloc_exclude_nr = 0;
191         h->alloc_exclude_start = 0;
192         root->fs_info->running_transaction->use_count++;
193         mutex_unlock(&root->fs_info->trans_mutex);
194         return h;
195 }
196
197 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
198                                                    int num_blocks)
199 {
200         return start_transaction(root, num_blocks, 1);
201 }
202 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
203                                                    int num_blocks)
204 {
205         return start_transaction(root, num_blocks, 0);
206 }
207
208 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
209                                                          int num_blocks)
210 {
211         return start_transaction(r, num_blocks, 2);
212 }
213
214 /* wait for a transaction commit to be fully complete */
215 static noinline int wait_for_commit(struct btrfs_root *root,
216                                     struct btrfs_transaction *commit)
217 {
218         DEFINE_WAIT(wait);
219         mutex_lock(&root->fs_info->trans_mutex);
220         while(!commit->commit_done) {
221                 prepare_to_wait(&commit->commit_wait, &wait,
222                                 TASK_UNINTERRUPTIBLE);
223                 if (commit->commit_done)
224                         break;
225                 mutex_unlock(&root->fs_info->trans_mutex);
226                 schedule();
227                 mutex_lock(&root->fs_info->trans_mutex);
228         }
229         mutex_unlock(&root->fs_info->trans_mutex);
230         finish_wait(&commit->commit_wait, &wait);
231         return 0;
232 }
233
234 /*
235  * rate limit against the drop_snapshot code.  This helps to slow down new operations
236  * if the drop_snapshot code isn't able to keep up.
237  */
238 static void throttle_on_drops(struct btrfs_root *root)
239 {
240         struct btrfs_fs_info *info = root->fs_info;
241         int harder_count = 0;
242
243 harder:
244         if (atomic_read(&info->throttles)) {
245                 DEFINE_WAIT(wait);
246                 int thr;
247                 thr = atomic_read(&info->throttle_gen);
248
249                 do {
250                         prepare_to_wait(&info->transaction_throttle,
251                                         &wait, TASK_UNINTERRUPTIBLE);
252                         if (!atomic_read(&info->throttles)) {
253                                 finish_wait(&info->transaction_throttle, &wait);
254                                 break;
255                         }
256                         schedule();
257                         finish_wait(&info->transaction_throttle, &wait);
258                 } while (thr == atomic_read(&info->throttle_gen));
259                 harder_count++;
260
261                 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
262                     harder_count < 2)
263                         goto harder;
264
265                 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
266                     harder_count < 10)
267                         goto harder;
268
269                 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
270                     harder_count < 20)
271                         goto harder;
272         }
273 }
274
275 void btrfs_throttle(struct btrfs_root *root)
276 {
277         mutex_lock(&root->fs_info->trans_mutex);
278         if (!root->fs_info->open_ioctl_trans)
279                 wait_current_trans(root);
280         mutex_unlock(&root->fs_info->trans_mutex);
281
282         throttle_on_drops(root);
283 }
284
285 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
286                           struct btrfs_root *root, int throttle)
287 {
288         struct btrfs_transaction *cur_trans;
289         struct btrfs_fs_info *info = root->fs_info;
290
291         mutex_lock(&info->trans_mutex);
292         cur_trans = info->running_transaction;
293         WARN_ON(cur_trans != trans->transaction);
294         WARN_ON(cur_trans->num_writers < 1);
295         cur_trans->num_writers--;
296
297         if (waitqueue_active(&cur_trans->writer_wait))
298                 wake_up(&cur_trans->writer_wait);
299         put_transaction(cur_trans);
300         mutex_unlock(&info->trans_mutex);
301         memset(trans, 0, sizeof(*trans));
302         kmem_cache_free(btrfs_trans_handle_cachep, trans);
303
304         if (throttle)
305                 throttle_on_drops(root);
306
307         return 0;
308 }
309
310 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
311                           struct btrfs_root *root)
312 {
313         return __btrfs_end_transaction(trans, root, 0);
314 }
315
316 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
317                                    struct btrfs_root *root)
318 {
319         return __btrfs_end_transaction(trans, root, 1);
320 }
321
322 /*
323  * when btree blocks are allocated, they have some corresponding bits set for
324  * them in one of two extent_io trees.  This is used to make sure all of
325  * those extents are on disk for transaction or log commit
326  */
327 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
328                                         struct extent_io_tree *dirty_pages)
329 {
330         int ret;
331         int err = 0;
332         int werr = 0;
333         struct page *page;
334         struct inode *btree_inode = root->fs_info->btree_inode;
335         u64 start = 0;
336         u64 end;
337         unsigned long index;
338
339         while(1) {
340                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
341                                             EXTENT_DIRTY);
342                 if (ret)
343                         break;
344                 while(start <= end) {
345                         cond_resched();
346
347                         index = start >> PAGE_CACHE_SHIFT;
348                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
349                         page = find_get_page(btree_inode->i_mapping, index);
350                         if (!page)
351                                 continue;
352
353                         btree_lock_page_hook(page);
354                         if (!page->mapping) {
355                                 unlock_page(page);
356                                 page_cache_release(page);
357                                 continue;
358                         }
359
360                         if (PageWriteback(page)) {
361                                 if (PageDirty(page))
362                                         wait_on_page_writeback(page);
363                                 else {
364                                         unlock_page(page);
365                                         page_cache_release(page);
366                                         continue;
367                                 }
368                         }
369                         err = write_one_page(page, 0);
370                         if (err)
371                                 werr = err;
372                         page_cache_release(page);
373                 }
374         }
375         while(1) {
376                 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
377                                             EXTENT_DIRTY);
378                 if (ret)
379                         break;
380
381                 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
382                 while(start <= end) {
383                         index = start >> PAGE_CACHE_SHIFT;
384                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
385                         page = find_get_page(btree_inode->i_mapping, index);
386                         if (!page)
387                                 continue;
388                         if (PageDirty(page)) {
389                                 btree_lock_page_hook(page);
390                                 wait_on_page_writeback(page);
391                                 err = write_one_page(page, 0);
392                                 if (err)
393                                         werr = err;
394                         }
395                         wait_on_page_writeback(page);
396                         page_cache_release(page);
397                         cond_resched();
398                 }
399         }
400         if (err)
401                 werr = err;
402         return werr;
403 }
404
405 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
406                                      struct btrfs_root *root)
407 {
408         if (!trans || !trans->transaction) {
409                 struct inode *btree_inode;
410                 btree_inode = root->fs_info->btree_inode;
411                 return filemap_write_and_wait(btree_inode->i_mapping);
412         }
413         return btrfs_write_and_wait_marked_extents(root,
414                                            &trans->transaction->dirty_pages);
415 }
416
417 /*
418  * this is used to update the root pointer in the tree of tree roots.
419  *
420  * But, in the case of the extent allocation tree, updating the root
421  * pointer may allocate blocks which may change the root of the extent
422  * allocation tree.
423  *
424  * So, this loops and repeats and makes sure the cowonly root didn't
425  * change while the root pointer was being updated in the metadata.
426  */
427 static int update_cowonly_root(struct btrfs_trans_handle *trans,
428                                struct btrfs_root *root)
429 {
430         int ret;
431         u64 old_root_bytenr;
432         struct btrfs_root *tree_root = root->fs_info->tree_root;
433
434         btrfs_write_dirty_block_groups(trans, root);
435         while(1) {
436                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
437                 if (old_root_bytenr == root->node->start)
438                         break;
439                 btrfs_set_root_bytenr(&root->root_item,
440                                        root->node->start);
441                 btrfs_set_root_level(&root->root_item,
442                                      btrfs_header_level(root->node));
443                 ret = btrfs_update_root(trans, tree_root,
444                                         &root->root_key,
445                                         &root->root_item);
446                 BUG_ON(ret);
447                 btrfs_write_dirty_block_groups(trans, root);
448         }
449         return 0;
450 }
451
452 /*
453  * update all the cowonly tree roots on disk
454  */
455 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
456                             struct btrfs_root *root)
457 {
458         struct btrfs_fs_info *fs_info = root->fs_info;
459         struct list_head *next;
460
461         while(!list_empty(&fs_info->dirty_cowonly_roots)) {
462                 next = fs_info->dirty_cowonly_roots.next;
463                 list_del_init(next);
464                 root = list_entry(next, struct btrfs_root, dirty_list);
465                 update_cowonly_root(trans, root);
466         }
467         return 0;
468 }
469
470 /*
471  * dead roots are old snapshots that need to be deleted.  This allocates
472  * a dirty root struct and adds it into the list of dead roots that need to
473  * be deleted
474  */
475 int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest)
476 {
477         struct btrfs_dirty_root *dirty;
478
479         dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
480         if (!dirty)
481                 return -ENOMEM;
482         dirty->root = root;
483         dirty->latest_root = latest;
484
485         mutex_lock(&root->fs_info->trans_mutex);
486         list_add(&dirty->list, &latest->fs_info->dead_roots);
487         mutex_unlock(&root->fs_info->trans_mutex);
488         return 0;
489 }
490
491 /*
492  * at transaction commit time we need to schedule the old roots for
493  * deletion via btrfs_drop_snapshot.  This runs through all the
494  * reference counted roots that were modified in the current
495  * transaction and puts them into the drop list
496  */
497 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
498                                     struct radix_tree_root *radix,
499                                     struct list_head *list)
500 {
501         struct btrfs_dirty_root *dirty;
502         struct btrfs_root *gang[8];
503         struct btrfs_root *root;
504         int i;
505         int ret;
506         int err = 0;
507         u32 refs;
508
509         while(1) {
510                 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
511                                                  ARRAY_SIZE(gang),
512                                                  BTRFS_ROOT_TRANS_TAG);
513                 if (ret == 0)
514                         break;
515                 for (i = 0; i < ret; i++) {
516                         root = gang[i];
517                         radix_tree_tag_clear(radix,
518                                      (unsigned long)root->root_key.objectid,
519                                      BTRFS_ROOT_TRANS_TAG);
520
521                         BUG_ON(!root->ref_tree);
522                         dirty = root->dirty_root;
523
524                         btrfs_free_log(trans, root);
525                         btrfs_free_reloc_root(root);
526
527                         if (root->commit_root == root->node) {
528                                 WARN_ON(root->node->start !=
529                                         btrfs_root_bytenr(&root->root_item));
530
531                                 free_extent_buffer(root->commit_root);
532                                 root->commit_root = NULL;
533                                 root->dirty_root = NULL;
534
535                                 spin_lock(&root->list_lock);
536                                 list_del_init(&dirty->root->dead_list);
537                                 spin_unlock(&root->list_lock);
538
539                                 kfree(dirty->root);
540                                 kfree(dirty);
541
542                                 /* make sure to update the root on disk
543                                  * so we get any updates to the block used
544                                  * counts
545                                  */
546                                 err = btrfs_update_root(trans,
547                                                 root->fs_info->tree_root,
548                                                 &root->root_key,
549                                                 &root->root_item);
550                                 continue;
551                         }
552
553                         memset(&root->root_item.drop_progress, 0,
554                                sizeof(struct btrfs_disk_key));
555                         root->root_item.drop_level = 0;
556                         root->commit_root = NULL;
557                         root->dirty_root = NULL;
558                         root->root_key.offset = root->fs_info->generation;
559                         btrfs_set_root_bytenr(&root->root_item,
560                                               root->node->start);
561                         btrfs_set_root_level(&root->root_item,
562                                              btrfs_header_level(root->node));
563                         err = btrfs_insert_root(trans, root->fs_info->tree_root,
564                                                 &root->root_key,
565                                                 &root->root_item);
566                         if (err)
567                                 break;
568
569                         refs = btrfs_root_refs(&dirty->root->root_item);
570                         btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
571                         err = btrfs_update_root(trans, root->fs_info->tree_root,
572                                                 &dirty->root->root_key,
573                                                 &dirty->root->root_item);
574
575                         BUG_ON(err);
576                         if (refs == 1) {
577                                 list_add(&dirty->list, list);
578                         } else {
579                                 WARN_ON(1);
580                                 free_extent_buffer(dirty->root->node);
581                                 kfree(dirty->root);
582                                 kfree(dirty);
583                         }
584                 }
585         }
586         return err;
587 }
588
589 /*
590  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
591  * otherwise every leaf in the btree is read and defragged.
592  */
593 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
594 {
595         struct btrfs_fs_info *info = root->fs_info;
596         int ret;
597         struct btrfs_trans_handle *trans;
598         unsigned long nr;
599
600         smp_mb();
601         if (root->defrag_running)
602                 return 0;
603         trans = btrfs_start_transaction(root, 1);
604         while (1) {
605                 root->defrag_running = 1;
606                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
607                 nr = trans->blocks_used;
608                 btrfs_end_transaction(trans, root);
609                 btrfs_btree_balance_dirty(info->tree_root, nr);
610                 cond_resched();
611
612                 trans = btrfs_start_transaction(root, 1);
613                 if (root->fs_info->closing || ret != -EAGAIN)
614                         break;
615         }
616         root->defrag_running = 0;
617         smp_mb();
618         btrfs_end_transaction(trans, root);
619         return 0;
620 }
621
622 /*
623  * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
624  * all of them
625  */
626 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
627                                      struct list_head *list)
628 {
629         struct btrfs_dirty_root *dirty;
630         struct btrfs_trans_handle *trans;
631         unsigned long nr;
632         u64 num_bytes;
633         u64 bytes_used;
634         u64 max_useless;
635         int ret = 0;
636         int err;
637
638         while(!list_empty(list)) {
639                 struct btrfs_root *root;
640
641                 dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
642                 list_del_init(&dirty->list);
643
644                 num_bytes = btrfs_root_used(&dirty->root->root_item);
645                 root = dirty->latest_root;
646                 atomic_inc(&root->fs_info->throttles);
647
648                 while(1) {
649                         trans = btrfs_start_transaction(tree_root, 1);
650                         mutex_lock(&root->fs_info->drop_mutex);
651                         ret = btrfs_drop_snapshot(trans, dirty->root);
652                         if (ret != -EAGAIN) {
653                                 break;
654                         }
655                         mutex_unlock(&root->fs_info->drop_mutex);
656
657                         err = btrfs_update_root(trans,
658                                         tree_root,
659                                         &dirty->root->root_key,
660                                         &dirty->root->root_item);
661                         if (err)
662                                 ret = err;
663                         nr = trans->blocks_used;
664                         ret = btrfs_end_transaction(trans, tree_root);
665                         BUG_ON(ret);
666
667                         btrfs_btree_balance_dirty(tree_root, nr);
668                         cond_resched();
669                 }
670                 BUG_ON(ret);
671                 atomic_dec(&root->fs_info->throttles);
672                 wake_up(&root->fs_info->transaction_throttle);
673
674                 mutex_lock(&root->fs_info->alloc_mutex);
675                 num_bytes -= btrfs_root_used(&dirty->root->root_item);
676                 bytes_used = btrfs_root_used(&root->root_item);
677                 if (num_bytes) {
678                         btrfs_record_root_in_trans(root);
679                         btrfs_set_root_used(&root->root_item,
680                                             bytes_used - num_bytes);
681                 }
682                 mutex_unlock(&root->fs_info->alloc_mutex);
683
684                 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
685                 if (ret) {
686                         BUG();
687                         break;
688                 }
689                 mutex_unlock(&root->fs_info->drop_mutex);
690
691                 spin_lock(&root->list_lock);
692                 list_del_init(&dirty->root->dead_list);
693                 if (!list_empty(&root->dead_list)) {
694                         struct btrfs_root *oldest;
695                         oldest = list_entry(root->dead_list.prev,
696                                             struct btrfs_root, dead_list);
697                         max_useless = oldest->root_key.offset - 1;
698                 } else {
699                         max_useless = root->root_key.offset - 1;
700                 }
701                 spin_unlock(&root->list_lock);
702
703                 nr = trans->blocks_used;
704                 ret = btrfs_end_transaction(trans, tree_root);
705                 BUG_ON(ret);
706
707                 ret = btrfs_remove_leaf_refs(root, max_useless, 0);
708                 BUG_ON(ret);
709
710                 free_extent_buffer(dirty->root->node);
711                 kfree(dirty->root);
712                 kfree(dirty);
713
714                 btrfs_btree_balance_dirty(tree_root, nr);
715                 cond_resched();
716         }
717         return ret;
718 }
719
720 /*
721  * new snapshots need to be created at a very specific time in the
722  * transaction commit.  This does the actual creation
723  */
724 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
725                                    struct btrfs_fs_info *fs_info,
726                                    struct btrfs_pending_snapshot *pending)
727 {
728         struct btrfs_key key;
729         struct btrfs_root_item *new_root_item;
730         struct btrfs_root *tree_root = fs_info->tree_root;
731         struct btrfs_root *root = pending->root;
732         struct extent_buffer *tmp;
733         struct extent_buffer *old;
734         int ret;
735         int namelen;
736         u64 objectid;
737
738         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
739         if (!new_root_item) {
740                 ret = -ENOMEM;
741                 goto fail;
742         }
743         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
744         if (ret)
745                 goto fail;
746
747         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
748
749         key.objectid = objectid;
750         key.offset = trans->transid;
751         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
752
753         old = btrfs_lock_root_node(root);
754         btrfs_cow_block(trans, root, old, NULL, 0, &old, 0);
755
756         btrfs_copy_root(trans, root, old, &tmp, objectid);
757         btrfs_tree_unlock(old);
758         free_extent_buffer(old);
759
760         btrfs_set_root_bytenr(new_root_item, tmp->start);
761         btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
762         ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
763                                 new_root_item);
764         btrfs_tree_unlock(tmp);
765         free_extent_buffer(tmp);
766         if (ret)
767                 goto fail;
768
769         /*
770          * insert the directory item
771          */
772         key.offset = (u64)-1;
773         namelen = strlen(pending->name);
774         ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
775                                     pending->name, namelen,
776                                     root->fs_info->sb->s_root->d_inode->i_ino,
777                                     &key, BTRFS_FT_DIR, 0);
778
779         if (ret)
780                 goto fail;
781
782         ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
783                              pending->name, strlen(pending->name), objectid,
784                              root->fs_info->sb->s_root->d_inode->i_ino, 0);
785
786         /* Invalidate existing dcache entry for new snapshot. */
787         btrfs_invalidate_dcache_root(root, pending->name, namelen);
788
789 fail:
790         kfree(new_root_item);
791         return ret;
792 }
793
794 /*
795  * create all the snapshots we've scheduled for creation
796  */
797 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
798                                              struct btrfs_fs_info *fs_info)
799 {
800         struct btrfs_pending_snapshot *pending;
801         struct list_head *head = &trans->transaction->pending_snapshots;
802         int ret;
803
804         while(!list_empty(head)) {
805                 pending = list_entry(head->next,
806                                      struct btrfs_pending_snapshot, list);
807                 ret = create_pending_snapshot(trans, fs_info, pending);
808                 BUG_ON(ret);
809                 list_del(&pending->list);
810                 kfree(pending->name);
811                 kfree(pending);
812         }
813         return 0;
814 }
815
816 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
817                              struct btrfs_root *root)
818 {
819         unsigned long joined = 0;
820         unsigned long timeout = 1;
821         struct btrfs_transaction *cur_trans;
822         struct btrfs_transaction *prev_trans = NULL;
823         struct btrfs_root *chunk_root = root->fs_info->chunk_root;
824         struct list_head dirty_fs_roots;
825         struct extent_io_tree *pinned_copy;
826         DEFINE_WAIT(wait);
827         int ret;
828
829         INIT_LIST_HEAD(&dirty_fs_roots);
830         mutex_lock(&root->fs_info->trans_mutex);
831         if (trans->transaction->in_commit) {
832                 cur_trans = trans->transaction;
833                 trans->transaction->use_count++;
834                 mutex_unlock(&root->fs_info->trans_mutex);
835                 btrfs_end_transaction(trans, root);
836
837                 ret = wait_for_commit(root, cur_trans);
838                 BUG_ON(ret);
839
840                 mutex_lock(&root->fs_info->trans_mutex);
841                 put_transaction(cur_trans);
842                 mutex_unlock(&root->fs_info->trans_mutex);
843
844                 return 0;
845         }
846
847         pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
848         if (!pinned_copy)
849                 return -ENOMEM;
850
851         extent_io_tree_init(pinned_copy,
852                              root->fs_info->btree_inode->i_mapping, GFP_NOFS);
853
854         trans->transaction->in_commit = 1;
855         trans->transaction->blocked = 1;
856         cur_trans = trans->transaction;
857         if (cur_trans->list.prev != &root->fs_info->trans_list) {
858                 prev_trans = list_entry(cur_trans->list.prev,
859                                         struct btrfs_transaction, list);
860                 if (!prev_trans->commit_done) {
861                         prev_trans->use_count++;
862                         mutex_unlock(&root->fs_info->trans_mutex);
863
864                         wait_for_commit(root, prev_trans);
865
866                         mutex_lock(&root->fs_info->trans_mutex);
867                         put_transaction(prev_trans);
868                 }
869         }
870
871         do {
872                 int snap_pending = 0;
873                 joined = cur_trans->num_joined;
874                 if (!list_empty(&trans->transaction->pending_snapshots))
875                         snap_pending = 1;
876
877                 WARN_ON(cur_trans != trans->transaction);
878                 prepare_to_wait(&cur_trans->writer_wait, &wait,
879                                 TASK_UNINTERRUPTIBLE);
880
881                 if (cur_trans->num_writers > 1)
882                         timeout = MAX_SCHEDULE_TIMEOUT;
883                 else
884                         timeout = 1;
885
886                 mutex_unlock(&root->fs_info->trans_mutex);
887
888                 if (snap_pending) {
889                         ret = btrfs_wait_ordered_extents(root, 1);
890                         BUG_ON(ret);
891                 }
892
893                 schedule_timeout(timeout);
894
895                 mutex_lock(&root->fs_info->trans_mutex);
896                 finish_wait(&cur_trans->writer_wait, &wait);
897         } while (cur_trans->num_writers > 1 ||
898                  (cur_trans->num_joined != joined));
899
900         ret = create_pending_snapshots(trans, root->fs_info);
901         BUG_ON(ret);
902
903         WARN_ON(cur_trans != trans->transaction);
904
905         /* btrfs_commit_tree_roots is responsible for getting the
906          * various roots consistent with each other.  Every pointer
907          * in the tree of tree roots has to point to the most up to date
908          * root for every subvolume and other tree.  So, we have to keep
909          * the tree logging code from jumping in and changing any
910          * of the trees.
911          *
912          * At this point in the commit, there can't be any tree-log
913          * writers, but a little lower down we drop the trans mutex
914          * and let new people in.  By holding the tree_log_mutex
915          * from now until after the super is written, we avoid races
916          * with the tree-log code.
917          */
918         mutex_lock(&root->fs_info->tree_log_mutex);
919         /*
920          * keep tree reloc code from adding new reloc trees
921          */
922         mutex_lock(&root->fs_info->tree_reloc_mutex);
923
924
925         ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
926                               &dirty_fs_roots);
927         BUG_ON(ret);
928
929         /* add_dirty_roots gets rid of all the tree log roots, it is now
930          * safe to free the root of tree log roots
931          */
932         btrfs_free_log_root_tree(trans, root->fs_info);
933
934         btrfs_free_reloc_mappings(root);
935
936         ret = btrfs_commit_tree_roots(trans, root);
937         BUG_ON(ret);
938
939         cur_trans = root->fs_info->running_transaction;
940         spin_lock(&root->fs_info->new_trans_lock);
941         root->fs_info->running_transaction = NULL;
942         spin_unlock(&root->fs_info->new_trans_lock);
943         btrfs_set_super_generation(&root->fs_info->super_copy,
944                                    cur_trans->transid);
945         btrfs_set_super_root(&root->fs_info->super_copy,
946                              root->fs_info->tree_root->node->start);
947         btrfs_set_super_root_level(&root->fs_info->super_copy,
948                            btrfs_header_level(root->fs_info->tree_root->node));
949
950         btrfs_set_super_chunk_root(&root->fs_info->super_copy,
951                                    chunk_root->node->start);
952         btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
953                                          btrfs_header_level(chunk_root->node));
954
955         if (!root->fs_info->log_root_recovering) {
956                 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
957                 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
958         }
959
960         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
961                sizeof(root->fs_info->super_copy));
962
963         btrfs_copy_pinned(root, pinned_copy);
964
965         trans->transaction->blocked = 0;
966         wake_up(&root->fs_info->transaction_throttle);
967         wake_up(&root->fs_info->transaction_wait);
968
969         mutex_unlock(&root->fs_info->trans_mutex);
970         ret = btrfs_write_and_wait_transaction(trans, root);
971         BUG_ON(ret);
972         write_ctree_super(trans, root);
973
974         /*
975          * the super is written, we can safely allow the tree-loggers
976          * to go about their business
977          */
978         mutex_unlock(&root->fs_info->tree_log_mutex);
979
980         btrfs_finish_extent_commit(trans, root, pinned_copy);
981         kfree(pinned_copy);
982
983         btrfs_drop_dead_reloc_roots(root);
984         mutex_unlock(&root->fs_info->tree_reloc_mutex);
985
986         mutex_lock(&root->fs_info->trans_mutex);
987
988         cur_trans->commit_done = 1;
989         root->fs_info->last_trans_committed = cur_trans->transid;
990         wake_up(&cur_trans->commit_wait);
991         put_transaction(cur_trans);
992         put_transaction(cur_trans);
993
994         list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
995         if (root->fs_info->closing)
996                 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
997
998         mutex_unlock(&root->fs_info->trans_mutex);
999         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1000
1001         if (root->fs_info->closing) {
1002                 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
1003         }
1004         return ret;
1005 }
1006
1007 /*
1008  * interface function to delete all the snapshots we have scheduled for deletion
1009  */
1010 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1011 {
1012         struct list_head dirty_roots;
1013         INIT_LIST_HEAD(&dirty_roots);
1014 again:
1015         mutex_lock(&root->fs_info->trans_mutex);
1016         list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
1017         mutex_unlock(&root->fs_info->trans_mutex);
1018
1019         if (!list_empty(&dirty_roots)) {
1020                 drop_dirty_roots(root, &dirty_roots);
1021                 goto again;
1022         }
1023         return 0;
1024 }