]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/transaction.c
Merge branch 'fixes-for-grant' of git://sources.calxeda.com/kernel/linux
[karo-tx-linux.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30 #include "inode-map.h"
31 #include "volumes.h"
32
33 #define BTRFS_ROOT_TRANS_TAG 0
34
35 void put_transaction(struct btrfs_transaction *transaction)
36 {
37         WARN_ON(atomic_read(&transaction->use_count) == 0);
38         if (atomic_dec_and_test(&transaction->use_count)) {
39                 BUG_ON(!list_empty(&transaction->list));
40                 WARN_ON(transaction->delayed_refs.root.rb_node);
41                 WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
42                 memset(transaction, 0, sizeof(*transaction));
43                 kmem_cache_free(btrfs_transaction_cachep, transaction);
44         }
45 }
46
47 static noinline void switch_commit_root(struct btrfs_root *root)
48 {
49         free_extent_buffer(root->commit_root);
50         root->commit_root = btrfs_root_node(root);
51 }
52
53 /*
54  * either allocate a new transaction or hop into the existing one
55  */
56 static noinline int join_transaction(struct btrfs_root *root, int nofail)
57 {
58         struct btrfs_transaction *cur_trans;
59         struct btrfs_fs_info *fs_info = root->fs_info;
60
61         spin_lock(&fs_info->trans_lock);
62 loop:
63         /* The file system has been taken offline. No new transactions. */
64         if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
65                 spin_unlock(&fs_info->trans_lock);
66                 return -EROFS;
67         }
68
69         if (fs_info->trans_no_join) {
70                 if (!nofail) {
71                         spin_unlock(&fs_info->trans_lock);
72                         return -EBUSY;
73                 }
74         }
75
76         cur_trans = fs_info->running_transaction;
77         if (cur_trans) {
78                 if (cur_trans->aborted) {
79                         spin_unlock(&fs_info->trans_lock);
80                         return cur_trans->aborted;
81                 }
82                 atomic_inc(&cur_trans->use_count);
83                 atomic_inc(&cur_trans->num_writers);
84                 cur_trans->num_joined++;
85                 spin_unlock(&fs_info->trans_lock);
86                 return 0;
87         }
88         spin_unlock(&fs_info->trans_lock);
89
90         cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
91         if (!cur_trans)
92                 return -ENOMEM;
93
94         spin_lock(&fs_info->trans_lock);
95         if (fs_info->running_transaction) {
96                 /*
97                  * someone started a transaction after we unlocked.  Make sure
98                  * to redo the trans_no_join checks above
99                  */
100                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
101                 cur_trans = fs_info->running_transaction;
102                 goto loop;
103         } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
104                 spin_unlock(&root->fs_info->trans_lock);
105                 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
106                 return -EROFS;
107         }
108
109         atomic_set(&cur_trans->num_writers, 1);
110         cur_trans->num_joined = 0;
111         init_waitqueue_head(&cur_trans->writer_wait);
112         init_waitqueue_head(&cur_trans->commit_wait);
113         cur_trans->in_commit = 0;
114         cur_trans->blocked = 0;
115         /*
116          * One for this trans handle, one so it will live on until we
117          * commit the transaction.
118          */
119         atomic_set(&cur_trans->use_count, 2);
120         cur_trans->commit_done = 0;
121         cur_trans->start_time = get_seconds();
122
123         cur_trans->delayed_refs.root = RB_ROOT;
124         cur_trans->delayed_refs.num_entries = 0;
125         cur_trans->delayed_refs.num_heads_ready = 0;
126         cur_trans->delayed_refs.num_heads = 0;
127         cur_trans->delayed_refs.flushing = 0;
128         cur_trans->delayed_refs.run_delayed_start = 0;
129         cur_trans->delayed_refs.seq = 1;
130
131         /*
132          * although the tree mod log is per file system and not per transaction,
133          * the log must never go across transaction boundaries.
134          */
135         smp_mb();
136         if (!list_empty(&fs_info->tree_mod_seq_list)) {
137                 printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
138                         "creating a fresh transaction\n");
139                 WARN_ON(1);
140         }
141         if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
142                 printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
143                         "creating a fresh transaction\n");
144                 WARN_ON(1);
145         }
146         atomic_set(&fs_info->tree_mod_seq, 0);
147
148         init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
149         spin_lock_init(&cur_trans->commit_lock);
150         spin_lock_init(&cur_trans->delayed_refs.lock);
151         INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
152
153         INIT_LIST_HEAD(&cur_trans->pending_snapshots);
154         list_add_tail(&cur_trans->list, &fs_info->trans_list);
155         extent_io_tree_init(&cur_trans->dirty_pages,
156                              fs_info->btree_inode->i_mapping);
157         fs_info->generation++;
158         cur_trans->transid = fs_info->generation;
159         fs_info->running_transaction = cur_trans;
160         cur_trans->aborted = 0;
161         spin_unlock(&fs_info->trans_lock);
162
163         return 0;
164 }
165
166 /*
167  * this does all the record keeping required to make sure that a reference
168  * counted root is properly recorded in a given transaction.  This is required
169  * to make sure the old root from before we joined the transaction is deleted
170  * when the transaction commits
171  */
172 static int record_root_in_trans(struct btrfs_trans_handle *trans,
173                                struct btrfs_root *root)
174 {
175         if (root->ref_cows && root->last_trans < trans->transid) {
176                 WARN_ON(root == root->fs_info->extent_root);
177                 WARN_ON(root->commit_root != root->node);
178
179                 /*
180                  * see below for in_trans_setup usage rules
181                  * we have the reloc mutex held now, so there
182                  * is only one writer in this function
183                  */
184                 root->in_trans_setup = 1;
185
186                 /* make sure readers find in_trans_setup before
187                  * they find our root->last_trans update
188                  */
189                 smp_wmb();
190
191                 spin_lock(&root->fs_info->fs_roots_radix_lock);
192                 if (root->last_trans == trans->transid) {
193                         spin_unlock(&root->fs_info->fs_roots_radix_lock);
194                         return 0;
195                 }
196                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
197                            (unsigned long)root->root_key.objectid,
198                            BTRFS_ROOT_TRANS_TAG);
199                 spin_unlock(&root->fs_info->fs_roots_radix_lock);
200                 root->last_trans = trans->transid;
201
202                 /* this is pretty tricky.  We don't want to
203                  * take the relocation lock in btrfs_record_root_in_trans
204                  * unless we're really doing the first setup for this root in
205                  * this transaction.
206                  *
207                  * Normally we'd use root->last_trans as a flag to decide
208                  * if we want to take the expensive mutex.
209                  *
210                  * But, we have to set root->last_trans before we
211                  * init the relocation root, otherwise, we trip over warnings
212                  * in ctree.c.  The solution used here is to flag ourselves
213                  * with root->in_trans_setup.  When this is 1, we're still
214                  * fixing up the reloc trees and everyone must wait.
215                  *
216                  * When this is zero, they can trust root->last_trans and fly
217                  * through btrfs_record_root_in_trans without having to take the
218                  * lock.  smp_wmb() makes sure that all the writes above are
219                  * done before we pop in the zero below
220                  */
221                 btrfs_init_reloc_root(trans, root);
222                 smp_wmb();
223                 root->in_trans_setup = 0;
224         }
225         return 0;
226 }
227
228
229 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
230                                struct btrfs_root *root)
231 {
232         if (!root->ref_cows)
233                 return 0;
234
235         /*
236          * see record_root_in_trans for comments about in_trans_setup usage
237          * and barriers
238          */
239         smp_rmb();
240         if (root->last_trans == trans->transid &&
241             !root->in_trans_setup)
242                 return 0;
243
244         mutex_lock(&root->fs_info->reloc_mutex);
245         record_root_in_trans(trans, root);
246         mutex_unlock(&root->fs_info->reloc_mutex);
247
248         return 0;
249 }
250
251 /* wait for commit against the current transaction to become unblocked
252  * when this is done, it is safe to start a new transaction, but the current
253  * transaction might not be fully on disk.
254  */
255 static void wait_current_trans(struct btrfs_root *root)
256 {
257         struct btrfs_transaction *cur_trans;
258
259         spin_lock(&root->fs_info->trans_lock);
260         cur_trans = root->fs_info->running_transaction;
261         if (cur_trans && cur_trans->blocked) {
262                 atomic_inc(&cur_trans->use_count);
263                 spin_unlock(&root->fs_info->trans_lock);
264
265                 wait_event(root->fs_info->transaction_wait,
266                            !cur_trans->blocked);
267                 put_transaction(cur_trans);
268         } else {
269                 spin_unlock(&root->fs_info->trans_lock);
270         }
271 }
272
273 enum btrfs_trans_type {
274         TRANS_START,
275         TRANS_JOIN,
276         TRANS_USERSPACE,
277         TRANS_JOIN_NOLOCK,
278 };
279
280 static int may_wait_transaction(struct btrfs_root *root, int type)
281 {
282         if (root->fs_info->log_root_recovering)
283                 return 0;
284
285         if (type == TRANS_USERSPACE)
286                 return 1;
287
288         if (type == TRANS_START &&
289             !atomic_read(&root->fs_info->open_ioctl_trans))
290                 return 1;
291
292         return 0;
293 }
294
295 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
296                                                     u64 num_items, int type)
297 {
298         struct btrfs_trans_handle *h;
299         struct btrfs_transaction *cur_trans;
300         u64 num_bytes = 0;
301         int ret;
302
303         if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
304                 return ERR_PTR(-EROFS);
305
306         if (current->journal_info) {
307                 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
308                 h = current->journal_info;
309                 h->use_count++;
310                 h->orig_rsv = h->block_rsv;
311                 h->block_rsv = NULL;
312                 goto got_it;
313         }
314
315         /*
316          * Do the reservation before we join the transaction so we can do all
317          * the appropriate flushing if need be.
318          */
319         if (num_items > 0 && root != root->fs_info->chunk_root) {
320                 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
321                 ret = btrfs_block_rsv_add(root,
322                                           &root->fs_info->trans_block_rsv,
323                                           num_bytes);
324                 if (ret)
325                         return ERR_PTR(ret);
326         }
327 again:
328         h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
329         if (!h)
330                 return ERR_PTR(-ENOMEM);
331
332         if (may_wait_transaction(root, type))
333                 wait_current_trans(root);
334
335         do {
336                 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
337                 if (ret == -EBUSY)
338                         wait_current_trans(root);
339         } while (ret == -EBUSY);
340
341         if (ret < 0) {
342                 kmem_cache_free(btrfs_trans_handle_cachep, h);
343                 return ERR_PTR(ret);
344         }
345
346         cur_trans = root->fs_info->running_transaction;
347
348         h->transid = cur_trans->transid;
349         h->transaction = cur_trans;
350         h->blocks_used = 0;
351         h->bytes_reserved = 0;
352         h->delayed_ref_updates = 0;
353         h->use_count = 1;
354         h->block_rsv = NULL;
355         h->orig_rsv = NULL;
356         h->aborted = 0;
357
358         smp_mb();
359         if (cur_trans->blocked && may_wait_transaction(root, type)) {
360                 btrfs_commit_transaction(h, root);
361                 goto again;
362         }
363
364         if (num_bytes) {
365                 trace_btrfs_space_reservation(root->fs_info, "transaction",
366                                               h->transid, num_bytes, 1);
367                 h->block_rsv = &root->fs_info->trans_block_rsv;
368                 h->bytes_reserved = num_bytes;
369         }
370
371 got_it:
372         btrfs_record_root_in_trans(h, root);
373
374         if (!current->journal_info && type != TRANS_USERSPACE)
375                 current->journal_info = h;
376         return h;
377 }
378
379 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
380                                                    int num_items)
381 {
382         return start_transaction(root, num_items, TRANS_START);
383 }
384 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
385 {
386         return start_transaction(root, 0, TRANS_JOIN);
387 }
388
389 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
390 {
391         return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
392 }
393
394 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
395 {
396         return start_transaction(root, 0, TRANS_USERSPACE);
397 }
398
399 /* wait for a transaction commit to be fully complete */
400 static noinline void wait_for_commit(struct btrfs_root *root,
401                                     struct btrfs_transaction *commit)
402 {
403         wait_event(commit->commit_wait, commit->commit_done);
404 }
405
406 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
407 {
408         struct btrfs_transaction *cur_trans = NULL, *t;
409         int ret;
410
411         ret = 0;
412         if (transid) {
413                 if (transid <= root->fs_info->last_trans_committed)
414                         goto out;
415
416                 /* find specified transaction */
417                 spin_lock(&root->fs_info->trans_lock);
418                 list_for_each_entry(t, &root->fs_info->trans_list, list) {
419                         if (t->transid == transid) {
420                                 cur_trans = t;
421                                 atomic_inc(&cur_trans->use_count);
422                                 break;
423                         }
424                         if (t->transid > transid)
425                                 break;
426                 }
427                 spin_unlock(&root->fs_info->trans_lock);
428                 ret = -EINVAL;
429                 if (!cur_trans)
430                         goto out;  /* bad transid */
431         } else {
432                 /* find newest transaction that is committing | committed */
433                 spin_lock(&root->fs_info->trans_lock);
434                 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
435                                             list) {
436                         if (t->in_commit) {
437                                 if (t->commit_done)
438                                         break;
439                                 cur_trans = t;
440                                 atomic_inc(&cur_trans->use_count);
441                                 break;
442                         }
443                 }
444                 spin_unlock(&root->fs_info->trans_lock);
445                 if (!cur_trans)
446                         goto out;  /* nothing committing|committed */
447         }
448
449         wait_for_commit(root, cur_trans);
450
451         put_transaction(cur_trans);
452         ret = 0;
453 out:
454         return ret;
455 }
456
457 void btrfs_throttle(struct btrfs_root *root)
458 {
459         if (!atomic_read(&root->fs_info->open_ioctl_trans))
460                 wait_current_trans(root);
461 }
462
463 static int should_end_transaction(struct btrfs_trans_handle *trans,
464                                   struct btrfs_root *root)
465 {
466         int ret;
467
468         ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
469         return ret ? 1 : 0;
470 }
471
472 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
473                                  struct btrfs_root *root)
474 {
475         struct btrfs_transaction *cur_trans = trans->transaction;
476         struct btrfs_block_rsv *rsv = trans->block_rsv;
477         int updates;
478         int err;
479
480         smp_mb();
481         if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
482                 return 1;
483
484         /*
485          * We need to do this in case we're deleting csums so the global block
486          * rsv get's used instead of the csum block rsv.
487          */
488         trans->block_rsv = NULL;
489
490         updates = trans->delayed_ref_updates;
491         trans->delayed_ref_updates = 0;
492         if (updates) {
493                 err = btrfs_run_delayed_refs(trans, root, updates);
494                 if (err) /* Error code will also eval true */
495                         return err;
496         }
497
498         trans->block_rsv = rsv;
499
500         return should_end_transaction(trans, root);
501 }
502
503 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
504                           struct btrfs_root *root, int throttle, int lock)
505 {
506         struct btrfs_transaction *cur_trans = trans->transaction;
507         struct btrfs_fs_info *info = root->fs_info;
508         int count = 0;
509         int err = 0;
510
511         if (--trans->use_count) {
512                 trans->block_rsv = trans->orig_rsv;
513                 return 0;
514         }
515
516         btrfs_trans_release_metadata(trans, root);
517         trans->block_rsv = NULL;
518         while (count < 2) {
519                 unsigned long cur = trans->delayed_ref_updates;
520                 trans->delayed_ref_updates = 0;
521                 if (cur &&
522                     trans->transaction->delayed_refs.num_heads_ready > 64) {
523                         trans->delayed_ref_updates = 0;
524                         btrfs_run_delayed_refs(trans, root, cur);
525                 } else {
526                         break;
527                 }
528                 count++;
529         }
530
531         if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
532             should_end_transaction(trans, root)) {
533                 trans->transaction->blocked = 1;
534                 smp_wmb();
535         }
536
537         if (lock && cur_trans->blocked && !cur_trans->in_commit) {
538                 if (throttle) {
539                         /*
540                          * We may race with somebody else here so end up having
541                          * to call end_transaction on ourselves again, so inc
542                          * our use_count.
543                          */
544                         trans->use_count++;
545                         return btrfs_commit_transaction(trans, root);
546                 } else {
547                         wake_up_process(info->transaction_kthread);
548                 }
549         }
550
551         WARN_ON(cur_trans != info->running_transaction);
552         WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
553         atomic_dec(&cur_trans->num_writers);
554
555         smp_mb();
556         if (waitqueue_active(&cur_trans->writer_wait))
557                 wake_up(&cur_trans->writer_wait);
558         put_transaction(cur_trans);
559
560         if (current->journal_info == trans)
561                 current->journal_info = NULL;
562
563         if (throttle)
564                 btrfs_run_delayed_iputs(root);
565
566         if (trans->aborted ||
567             root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
568                 err = -EIO;
569         }
570
571         memset(trans, 0, sizeof(*trans));
572         kmem_cache_free(btrfs_trans_handle_cachep, trans);
573         return err;
574 }
575
576 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
577                           struct btrfs_root *root)
578 {
579         int ret;
580
581         ret = __btrfs_end_transaction(trans, root, 0, 1);
582         if (ret)
583                 return ret;
584         return 0;
585 }
586
587 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
588                                    struct btrfs_root *root)
589 {
590         int ret;
591
592         ret = __btrfs_end_transaction(trans, root, 1, 1);
593         if (ret)
594                 return ret;
595         return 0;
596 }
597
598 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
599                                  struct btrfs_root *root)
600 {
601         int ret;
602
603         ret = __btrfs_end_transaction(trans, root, 0, 0);
604         if (ret)
605                 return ret;
606         return 0;
607 }
608
609 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
610                                 struct btrfs_root *root)
611 {
612         return __btrfs_end_transaction(trans, root, 1, 1);
613 }
614
615 /*
616  * when btree blocks are allocated, they have some corresponding bits set for
617  * them in one of two extent_io trees.  This is used to make sure all of
618  * those extents are sent to disk but does not wait on them
619  */
620 int btrfs_write_marked_extents(struct btrfs_root *root,
621                                struct extent_io_tree *dirty_pages, int mark)
622 {
623         int err = 0;
624         int werr = 0;
625         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
626         u64 start = 0;
627         u64 end;
628
629         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
630                                       mark)) {
631                 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
632                                    GFP_NOFS);
633                 err = filemap_fdatawrite_range(mapping, start, end);
634                 if (err)
635                         werr = err;
636                 cond_resched();
637                 start = end + 1;
638         }
639         if (err)
640                 werr = err;
641         return werr;
642 }
643
644 /*
645  * when btree blocks are allocated, they have some corresponding bits set for
646  * them in one of two extent_io trees.  This is used to make sure all of
647  * those extents are on disk for transaction or log commit.  We wait
648  * on all the pages and clear them from the dirty pages state tree
649  */
650 int btrfs_wait_marked_extents(struct btrfs_root *root,
651                               struct extent_io_tree *dirty_pages, int mark)
652 {
653         int err = 0;
654         int werr = 0;
655         struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
656         u64 start = 0;
657         u64 end;
658
659         while (!find_first_extent_bit(dirty_pages, start, &start, &end,
660                                       EXTENT_NEED_WAIT)) {
661                 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
662                 err = filemap_fdatawait_range(mapping, start, end);
663                 if (err)
664                         werr = err;
665                 cond_resched();
666                 start = end + 1;
667         }
668         if (err)
669                 werr = err;
670         return werr;
671 }
672
673 /*
674  * when btree blocks are allocated, they have some corresponding bits set for
675  * them in one of two extent_io trees.  This is used to make sure all of
676  * those extents are on disk for transaction or log commit
677  */
678 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
679                                 struct extent_io_tree *dirty_pages, int mark)
680 {
681         int ret;
682         int ret2;
683
684         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
685         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
686
687         if (ret)
688                 return ret;
689         if (ret2)
690                 return ret2;
691         return 0;
692 }
693
694 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
695                                      struct btrfs_root *root)
696 {
697         if (!trans || !trans->transaction) {
698                 struct inode *btree_inode;
699                 btree_inode = root->fs_info->btree_inode;
700                 return filemap_write_and_wait(btree_inode->i_mapping);
701         }
702         return btrfs_write_and_wait_marked_extents(root,
703                                            &trans->transaction->dirty_pages,
704                                            EXTENT_DIRTY);
705 }
706
707 /*
708  * this is used to update the root pointer in the tree of tree roots.
709  *
710  * But, in the case of the extent allocation tree, updating the root
711  * pointer may allocate blocks which may change the root of the extent
712  * allocation tree.
713  *
714  * So, this loops and repeats and makes sure the cowonly root didn't
715  * change while the root pointer was being updated in the metadata.
716  */
717 static int update_cowonly_root(struct btrfs_trans_handle *trans,
718                                struct btrfs_root *root)
719 {
720         int ret;
721         u64 old_root_bytenr;
722         u64 old_root_used;
723         struct btrfs_root *tree_root = root->fs_info->tree_root;
724
725         old_root_used = btrfs_root_used(&root->root_item);
726         btrfs_write_dirty_block_groups(trans, root);
727
728         while (1) {
729                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
730                 if (old_root_bytenr == root->node->start &&
731                     old_root_used == btrfs_root_used(&root->root_item))
732                         break;
733
734                 btrfs_set_root_node(&root->root_item, root->node);
735                 ret = btrfs_update_root(trans, tree_root,
736                                         &root->root_key,
737                                         &root->root_item);
738                 if (ret)
739                         return ret;
740
741                 old_root_used = btrfs_root_used(&root->root_item);
742                 ret = btrfs_write_dirty_block_groups(trans, root);
743                 if (ret)
744                         return ret;
745         }
746
747         if (root != root->fs_info->extent_root)
748                 switch_commit_root(root);
749
750         return 0;
751 }
752
753 /*
754  * update all the cowonly tree roots on disk
755  *
756  * The error handling in this function may not be obvious. Any of the
757  * failures will cause the file system to go offline. We still need
758  * to clean up the delayed refs.
759  */
760 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
761                                          struct btrfs_root *root)
762 {
763         struct btrfs_fs_info *fs_info = root->fs_info;
764         struct list_head *next;
765         struct extent_buffer *eb;
766         int ret;
767
768         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
769         if (ret)
770                 return ret;
771
772         eb = btrfs_lock_root_node(fs_info->tree_root);
773         ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
774                               0, &eb);
775         btrfs_tree_unlock(eb);
776         free_extent_buffer(eb);
777
778         if (ret)
779                 return ret;
780
781         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
782         if (ret)
783                 return ret;
784
785         ret = btrfs_run_dev_stats(trans, root->fs_info);
786         BUG_ON(ret);
787
788         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
789                 next = fs_info->dirty_cowonly_roots.next;
790                 list_del_init(next);
791                 root = list_entry(next, struct btrfs_root, dirty_list);
792
793                 ret = update_cowonly_root(trans, root);
794                 if (ret)
795                         return ret;
796         }
797
798         down_write(&fs_info->extent_commit_sem);
799         switch_commit_root(fs_info->extent_root);
800         up_write(&fs_info->extent_commit_sem);
801
802         return 0;
803 }
804
805 /*
806  * dead roots are old snapshots that need to be deleted.  This allocates
807  * a dirty root struct and adds it into the list of dead roots that need to
808  * be deleted
809  */
810 int btrfs_add_dead_root(struct btrfs_root *root)
811 {
812         spin_lock(&root->fs_info->trans_lock);
813         list_add(&root->root_list, &root->fs_info->dead_roots);
814         spin_unlock(&root->fs_info->trans_lock);
815         return 0;
816 }
817
818 /*
819  * update all the cowonly tree roots on disk
820  */
821 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
822                                     struct btrfs_root *root)
823 {
824         struct btrfs_root *gang[8];
825         struct btrfs_fs_info *fs_info = root->fs_info;
826         int i;
827         int ret;
828         int err = 0;
829
830         spin_lock(&fs_info->fs_roots_radix_lock);
831         while (1) {
832                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
833                                                  (void **)gang, 0,
834                                                  ARRAY_SIZE(gang),
835                                                  BTRFS_ROOT_TRANS_TAG);
836                 if (ret == 0)
837                         break;
838                 for (i = 0; i < ret; i++) {
839                         root = gang[i];
840                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
841                                         (unsigned long)root->root_key.objectid,
842                                         BTRFS_ROOT_TRANS_TAG);
843                         spin_unlock(&fs_info->fs_roots_radix_lock);
844
845                         btrfs_free_log(trans, root);
846                         btrfs_update_reloc_root(trans, root);
847                         btrfs_orphan_commit_root(trans, root);
848
849                         btrfs_save_ino_cache(root, trans);
850
851                         /* see comments in should_cow_block() */
852                         root->force_cow = 0;
853                         smp_wmb();
854
855                         if (root->commit_root != root->node) {
856                                 mutex_lock(&root->fs_commit_mutex);
857                                 switch_commit_root(root);
858                                 btrfs_unpin_free_ino(root);
859                                 mutex_unlock(&root->fs_commit_mutex);
860
861                                 btrfs_set_root_node(&root->root_item,
862                                                     root->node);
863                         }
864
865                         err = btrfs_update_root(trans, fs_info->tree_root,
866                                                 &root->root_key,
867                                                 &root->root_item);
868                         spin_lock(&fs_info->fs_roots_radix_lock);
869                         if (err)
870                                 break;
871                 }
872         }
873         spin_unlock(&fs_info->fs_roots_radix_lock);
874         return err;
875 }
876
877 /*
878  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
879  * otherwise every leaf in the btree is read and defragged.
880  */
881 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
882 {
883         struct btrfs_fs_info *info = root->fs_info;
884         struct btrfs_trans_handle *trans;
885         int ret;
886         unsigned long nr;
887
888         if (xchg(&root->defrag_running, 1))
889                 return 0;
890
891         while (1) {
892                 trans = btrfs_start_transaction(root, 0);
893                 if (IS_ERR(trans))
894                         return PTR_ERR(trans);
895
896                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
897
898                 nr = trans->blocks_used;
899                 btrfs_end_transaction(trans, root);
900                 btrfs_btree_balance_dirty(info->tree_root, nr);
901                 cond_resched();
902
903                 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
904                         break;
905         }
906         root->defrag_running = 0;
907         return ret;
908 }
909
910 /*
911  * new snapshots need to be created at a very specific time in the
912  * transaction commit.  This does the actual creation
913  */
914 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
915                                    struct btrfs_fs_info *fs_info,
916                                    struct btrfs_pending_snapshot *pending)
917 {
918         struct btrfs_key key;
919         struct btrfs_root_item *new_root_item;
920         struct btrfs_root *tree_root = fs_info->tree_root;
921         struct btrfs_root *root = pending->root;
922         struct btrfs_root *parent_root;
923         struct btrfs_block_rsv *rsv;
924         struct inode *parent_inode;
925         struct dentry *parent;
926         struct dentry *dentry;
927         struct extent_buffer *tmp;
928         struct extent_buffer *old;
929         int ret;
930         u64 to_reserve = 0;
931         u64 index = 0;
932         u64 objectid;
933         u64 root_flags;
934
935         rsv = trans->block_rsv;
936
937         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
938         if (!new_root_item) {
939                 ret = pending->error = -ENOMEM;
940                 goto fail;
941         }
942
943         ret = btrfs_find_free_objectid(tree_root, &objectid);
944         if (ret) {
945                 pending->error = ret;
946                 goto fail;
947         }
948
949         btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
950
951         if (to_reserve > 0) {
952                 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
953                                                   to_reserve);
954                 if (ret) {
955                         pending->error = ret;
956                         goto fail;
957                 }
958         }
959
960         key.objectid = objectid;
961         key.offset = (u64)-1;
962         key.type = BTRFS_ROOT_ITEM_KEY;
963
964         trans->block_rsv = &pending->block_rsv;
965
966         dentry = pending->dentry;
967         parent = dget_parent(dentry);
968         parent_inode = parent->d_inode;
969         parent_root = BTRFS_I(parent_inode)->root;
970         record_root_in_trans(trans, parent_root);
971
972         /*
973          * insert the directory item
974          */
975         ret = btrfs_set_inode_index(parent_inode, &index);
976         BUG_ON(ret); /* -ENOMEM */
977         ret = btrfs_insert_dir_item(trans, parent_root,
978                                 dentry->d_name.name, dentry->d_name.len,
979                                 parent_inode, &key,
980                                 BTRFS_FT_DIR, index);
981         if (ret == -EEXIST) {
982                 pending->error = -EEXIST;
983                 dput(parent);
984                 goto fail;
985         } else if (ret) {
986                 goto abort_trans_dput;
987         }
988
989         btrfs_i_size_write(parent_inode, parent_inode->i_size +
990                                          dentry->d_name.len * 2);
991         ret = btrfs_update_inode(trans, parent_root, parent_inode);
992         if (ret)
993                 goto abort_trans_dput;
994
995         /*
996          * pull in the delayed directory update
997          * and the delayed inode item
998          * otherwise we corrupt the FS during
999          * snapshot
1000          */
1001         ret = btrfs_run_delayed_items(trans, root);
1002         if (ret) { /* Transaction aborted */
1003                 dput(parent);
1004                 goto fail;
1005         }
1006
1007         record_root_in_trans(trans, root);
1008         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1009         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1010         btrfs_check_and_init_root_item(new_root_item);
1011
1012         root_flags = btrfs_root_flags(new_root_item);
1013         if (pending->readonly)
1014                 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1015         else
1016                 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1017         btrfs_set_root_flags(new_root_item, root_flags);
1018
1019         old = btrfs_lock_root_node(root);
1020         ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1021         if (ret) {
1022                 btrfs_tree_unlock(old);
1023                 free_extent_buffer(old);
1024                 goto abort_trans_dput;
1025         }
1026
1027         btrfs_set_lock_blocking(old);
1028
1029         ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1030         /* clean up in any case */
1031         btrfs_tree_unlock(old);
1032         free_extent_buffer(old);
1033         if (ret)
1034                 goto abort_trans_dput;
1035
1036         /* see comments in should_cow_block() */
1037         root->force_cow = 1;
1038         smp_wmb();
1039
1040         btrfs_set_root_node(new_root_item, tmp);
1041         /* record when the snapshot was created in key.offset */
1042         key.offset = trans->transid;
1043         ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1044         btrfs_tree_unlock(tmp);
1045         free_extent_buffer(tmp);
1046         if (ret)
1047                 goto abort_trans_dput;
1048
1049         /*
1050          * insert root back/forward references
1051          */
1052         ret = btrfs_add_root_ref(trans, tree_root, objectid,
1053                                  parent_root->root_key.objectid,
1054                                  btrfs_ino(parent_inode), index,
1055                                  dentry->d_name.name, dentry->d_name.len);
1056         dput(parent);
1057         if (ret)
1058                 goto fail;
1059
1060         key.offset = (u64)-1;
1061         pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1062         if (IS_ERR(pending->snap)) {
1063                 ret = PTR_ERR(pending->snap);
1064                 goto abort_trans;
1065         }
1066
1067         ret = btrfs_reloc_post_snapshot(trans, pending);
1068         if (ret)
1069                 goto abort_trans;
1070         ret = 0;
1071 fail:
1072         kfree(new_root_item);
1073         trans->block_rsv = rsv;
1074         btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1075         return ret;
1076
1077 abort_trans_dput:
1078         dput(parent);
1079 abort_trans:
1080         btrfs_abort_transaction(trans, root, ret);
1081         goto fail;
1082 }
1083
1084 /*
1085  * create all the snapshots we've scheduled for creation
1086  */
1087 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1088                                              struct btrfs_fs_info *fs_info)
1089 {
1090         struct btrfs_pending_snapshot *pending;
1091         struct list_head *head = &trans->transaction->pending_snapshots;
1092
1093         list_for_each_entry(pending, head, list)
1094                 create_pending_snapshot(trans, fs_info, pending);
1095         return 0;
1096 }
1097
1098 static void update_super_roots(struct btrfs_root *root)
1099 {
1100         struct btrfs_root_item *root_item;
1101         struct btrfs_super_block *super;
1102
1103         super = root->fs_info->super_copy;
1104
1105         root_item = &root->fs_info->chunk_root->root_item;
1106         super->chunk_root = root_item->bytenr;
1107         super->chunk_root_generation = root_item->generation;
1108         super->chunk_root_level = root_item->level;
1109
1110         root_item = &root->fs_info->tree_root->root_item;
1111         super->root = root_item->bytenr;
1112         super->generation = root_item->generation;
1113         super->root_level = root_item->level;
1114         if (btrfs_test_opt(root, SPACE_CACHE))
1115                 super->cache_generation = root_item->generation;
1116 }
1117
1118 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1119 {
1120         int ret = 0;
1121         spin_lock(&info->trans_lock);
1122         if (info->running_transaction)
1123                 ret = info->running_transaction->in_commit;
1124         spin_unlock(&info->trans_lock);
1125         return ret;
1126 }
1127
1128 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1129 {
1130         int ret = 0;
1131         spin_lock(&info->trans_lock);
1132         if (info->running_transaction)
1133                 ret = info->running_transaction->blocked;
1134         spin_unlock(&info->trans_lock);
1135         return ret;
1136 }
1137
1138 /*
1139  * wait for the current transaction commit to start and block subsequent
1140  * transaction joins
1141  */
1142 static void wait_current_trans_commit_start(struct btrfs_root *root,
1143                                             struct btrfs_transaction *trans)
1144 {
1145         wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1146 }
1147
1148 /*
1149  * wait for the current transaction to start and then become unblocked.
1150  * caller holds ref.
1151  */
1152 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1153                                          struct btrfs_transaction *trans)
1154 {
1155         wait_event(root->fs_info->transaction_wait,
1156                    trans->commit_done || (trans->in_commit && !trans->blocked));
1157 }
1158
1159 /*
1160  * commit transactions asynchronously. once btrfs_commit_transaction_async
1161  * returns, any subsequent transaction will not be allowed to join.
1162  */
1163 struct btrfs_async_commit {
1164         struct btrfs_trans_handle *newtrans;
1165         struct btrfs_root *root;
1166         struct delayed_work work;
1167 };
1168
1169 static void do_async_commit(struct work_struct *work)
1170 {
1171         struct btrfs_async_commit *ac =
1172                 container_of(work, struct btrfs_async_commit, work.work);
1173
1174         btrfs_commit_transaction(ac->newtrans, ac->root);
1175         kfree(ac);
1176 }
1177
1178 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1179                                    struct btrfs_root *root,
1180                                    int wait_for_unblock)
1181 {
1182         struct btrfs_async_commit *ac;
1183         struct btrfs_transaction *cur_trans;
1184
1185         ac = kmalloc(sizeof(*ac), GFP_NOFS);
1186         if (!ac)
1187                 return -ENOMEM;
1188
1189         INIT_DELAYED_WORK(&ac->work, do_async_commit);
1190         ac->root = root;
1191         ac->newtrans = btrfs_join_transaction(root);
1192         if (IS_ERR(ac->newtrans)) {
1193                 int err = PTR_ERR(ac->newtrans);
1194                 kfree(ac);
1195                 return err;
1196         }
1197
1198         /* take transaction reference */
1199         cur_trans = trans->transaction;
1200         atomic_inc(&cur_trans->use_count);
1201
1202         btrfs_end_transaction(trans, root);
1203         schedule_delayed_work(&ac->work, 0);
1204
1205         /* wait for transaction to start and unblock */
1206         if (wait_for_unblock)
1207                 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1208         else
1209                 wait_current_trans_commit_start(root, cur_trans);
1210
1211         if (current->journal_info == trans)
1212                 current->journal_info = NULL;
1213
1214         put_transaction(cur_trans);
1215         return 0;
1216 }
1217
1218
1219 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1220                                 struct btrfs_root *root, int err)
1221 {
1222         struct btrfs_transaction *cur_trans = trans->transaction;
1223
1224         WARN_ON(trans->use_count > 1);
1225
1226         btrfs_abort_transaction(trans, root, err);
1227
1228         spin_lock(&root->fs_info->trans_lock);
1229         list_del_init(&cur_trans->list);
1230         if (cur_trans == root->fs_info->running_transaction) {
1231                 root->fs_info->running_transaction = NULL;
1232                 root->fs_info->trans_no_join = 0;
1233         }
1234         spin_unlock(&root->fs_info->trans_lock);
1235
1236         btrfs_cleanup_one_transaction(trans->transaction, root);
1237
1238         put_transaction(cur_trans);
1239         put_transaction(cur_trans);
1240
1241         trace_btrfs_transaction_commit(root);
1242
1243         btrfs_scrub_continue(root);
1244
1245         if (current->journal_info == trans)
1246                 current->journal_info = NULL;
1247
1248         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1249 }
1250
1251 /*
1252  * btrfs_transaction state sequence:
1253  *    in_commit = 0, blocked = 0  (initial)
1254  *    in_commit = 1, blocked = 1
1255  *    blocked = 0
1256  *    commit_done = 1
1257  */
1258 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1259                              struct btrfs_root *root)
1260 {
1261         unsigned long joined = 0;
1262         struct btrfs_transaction *cur_trans = trans->transaction;
1263         struct btrfs_transaction *prev_trans = NULL;
1264         DEFINE_WAIT(wait);
1265         int ret = -EIO;
1266         int should_grow = 0;
1267         unsigned long now = get_seconds();
1268         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1269
1270         btrfs_run_ordered_operations(root, 0);
1271
1272         btrfs_trans_release_metadata(trans, root);
1273         trans->block_rsv = NULL;
1274
1275         if (cur_trans->aborted)
1276                 goto cleanup_transaction;
1277
1278         /* make a pass through all the delayed refs we have so far
1279          * any runnings procs may add more while we are here
1280          */
1281         ret = btrfs_run_delayed_refs(trans, root, 0);
1282         if (ret)
1283                 goto cleanup_transaction;
1284
1285         cur_trans = trans->transaction;
1286
1287         /*
1288          * set the flushing flag so procs in this transaction have to
1289          * start sending their work down.
1290          */
1291         cur_trans->delayed_refs.flushing = 1;
1292
1293         ret = btrfs_run_delayed_refs(trans, root, 0);
1294         if (ret)
1295                 goto cleanup_transaction;
1296
1297         spin_lock(&cur_trans->commit_lock);
1298         if (cur_trans->in_commit) {
1299                 spin_unlock(&cur_trans->commit_lock);
1300                 atomic_inc(&cur_trans->use_count);
1301                 ret = btrfs_end_transaction(trans, root);
1302
1303                 wait_for_commit(root, cur_trans);
1304
1305                 put_transaction(cur_trans);
1306
1307                 return ret;
1308         }
1309
1310         trans->transaction->in_commit = 1;
1311         trans->transaction->blocked = 1;
1312         spin_unlock(&cur_trans->commit_lock);
1313         wake_up(&root->fs_info->transaction_blocked_wait);
1314
1315         spin_lock(&root->fs_info->trans_lock);
1316         if (cur_trans->list.prev != &root->fs_info->trans_list) {
1317                 prev_trans = list_entry(cur_trans->list.prev,
1318                                         struct btrfs_transaction, list);
1319                 if (!prev_trans->commit_done) {
1320                         atomic_inc(&prev_trans->use_count);
1321                         spin_unlock(&root->fs_info->trans_lock);
1322
1323                         wait_for_commit(root, prev_trans);
1324
1325                         put_transaction(prev_trans);
1326                 } else {
1327                         spin_unlock(&root->fs_info->trans_lock);
1328                 }
1329         } else {
1330                 spin_unlock(&root->fs_info->trans_lock);
1331         }
1332
1333         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1334                 should_grow = 1;
1335
1336         do {
1337                 int snap_pending = 0;
1338
1339                 joined = cur_trans->num_joined;
1340                 if (!list_empty(&trans->transaction->pending_snapshots))
1341                         snap_pending = 1;
1342
1343                 WARN_ON(cur_trans != trans->transaction);
1344
1345                 if (flush_on_commit || snap_pending) {
1346                         btrfs_start_delalloc_inodes(root, 1);
1347                         btrfs_wait_ordered_extents(root, 0, 1);
1348                 }
1349
1350                 ret = btrfs_run_delayed_items(trans, root);
1351                 if (ret)
1352                         goto cleanup_transaction;
1353
1354                 /*
1355                  * rename don't use btrfs_join_transaction, so, once we
1356                  * set the transaction to blocked above, we aren't going
1357                  * to get any new ordered operations.  We can safely run
1358                  * it here and no for sure that nothing new will be added
1359                  * to the list
1360                  */
1361                 btrfs_run_ordered_operations(root, 1);
1362
1363                 prepare_to_wait(&cur_trans->writer_wait, &wait,
1364                                 TASK_UNINTERRUPTIBLE);
1365
1366                 if (atomic_read(&cur_trans->num_writers) > 1)
1367                         schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1368                 else if (should_grow)
1369                         schedule_timeout(1);
1370
1371                 finish_wait(&cur_trans->writer_wait, &wait);
1372         } while (atomic_read(&cur_trans->num_writers) > 1 ||
1373                  (should_grow && cur_trans->num_joined != joined));
1374
1375         /*
1376          * Ok now we need to make sure to block out any other joins while we
1377          * commit the transaction.  We could have started a join before setting
1378          * no_join so make sure to wait for num_writers to == 1 again.
1379          */
1380         spin_lock(&root->fs_info->trans_lock);
1381         root->fs_info->trans_no_join = 1;
1382         spin_unlock(&root->fs_info->trans_lock);
1383         wait_event(cur_trans->writer_wait,
1384                    atomic_read(&cur_trans->num_writers) == 1);
1385
1386         /*
1387          * the reloc mutex makes sure that we stop
1388          * the balancing code from coming in and moving
1389          * extents around in the middle of the commit
1390          */
1391         mutex_lock(&root->fs_info->reloc_mutex);
1392
1393         ret = btrfs_run_delayed_items(trans, root);
1394         if (ret) {
1395                 mutex_unlock(&root->fs_info->reloc_mutex);
1396                 goto cleanup_transaction;
1397         }
1398
1399         ret = create_pending_snapshots(trans, root->fs_info);
1400         if (ret) {
1401                 mutex_unlock(&root->fs_info->reloc_mutex);
1402                 goto cleanup_transaction;
1403         }
1404
1405         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1406         if (ret) {
1407                 mutex_unlock(&root->fs_info->reloc_mutex);
1408                 goto cleanup_transaction;
1409         }
1410
1411         /*
1412          * make sure none of the code above managed to slip in a
1413          * delayed item
1414          */
1415         btrfs_assert_delayed_root_empty(root);
1416
1417         WARN_ON(cur_trans != trans->transaction);
1418
1419         btrfs_scrub_pause(root);
1420         /* btrfs_commit_tree_roots is responsible for getting the
1421          * various roots consistent with each other.  Every pointer
1422          * in the tree of tree roots has to point to the most up to date
1423          * root for every subvolume and other tree.  So, we have to keep
1424          * the tree logging code from jumping in and changing any
1425          * of the trees.
1426          *
1427          * At this point in the commit, there can't be any tree-log
1428          * writers, but a little lower down we drop the trans mutex
1429          * and let new people in.  By holding the tree_log_mutex
1430          * from now until after the super is written, we avoid races
1431          * with the tree-log code.
1432          */
1433         mutex_lock(&root->fs_info->tree_log_mutex);
1434
1435         ret = commit_fs_roots(trans, root);
1436         if (ret) {
1437                 mutex_unlock(&root->fs_info->tree_log_mutex);
1438                 mutex_unlock(&root->fs_info->reloc_mutex);
1439                 goto cleanup_transaction;
1440         }
1441
1442         /* commit_fs_roots gets rid of all the tree log roots, it is now
1443          * safe to free the root of tree log roots
1444          */
1445         btrfs_free_log_root_tree(trans, root->fs_info);
1446
1447         ret = commit_cowonly_roots(trans, root);
1448         if (ret) {
1449                 mutex_unlock(&root->fs_info->tree_log_mutex);
1450                 mutex_unlock(&root->fs_info->reloc_mutex);
1451                 goto cleanup_transaction;
1452         }
1453
1454         btrfs_prepare_extent_commit(trans, root);
1455
1456         cur_trans = root->fs_info->running_transaction;
1457
1458         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1459                             root->fs_info->tree_root->node);
1460         switch_commit_root(root->fs_info->tree_root);
1461
1462         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1463                             root->fs_info->chunk_root->node);
1464         switch_commit_root(root->fs_info->chunk_root);
1465
1466         update_super_roots(root);
1467
1468         if (!root->fs_info->log_root_recovering) {
1469                 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1470                 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1471         }
1472
1473         memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1474                sizeof(*root->fs_info->super_copy));
1475
1476         trans->transaction->blocked = 0;
1477         spin_lock(&root->fs_info->trans_lock);
1478         root->fs_info->running_transaction = NULL;
1479         root->fs_info->trans_no_join = 0;
1480         spin_unlock(&root->fs_info->trans_lock);
1481         mutex_unlock(&root->fs_info->reloc_mutex);
1482
1483         wake_up(&root->fs_info->transaction_wait);
1484
1485         ret = btrfs_write_and_wait_transaction(trans, root);
1486         if (ret) {
1487                 btrfs_error(root->fs_info, ret,
1488                             "Error while writing out transaction.");
1489                 mutex_unlock(&root->fs_info->tree_log_mutex);
1490                 goto cleanup_transaction;
1491         }
1492
1493         ret = write_ctree_super(trans, root, 0);
1494         if (ret) {
1495                 mutex_unlock(&root->fs_info->tree_log_mutex);
1496                 goto cleanup_transaction;
1497         }
1498
1499         /*
1500          * the super is written, we can safely allow the tree-loggers
1501          * to go about their business
1502          */
1503         mutex_unlock(&root->fs_info->tree_log_mutex);
1504
1505         btrfs_finish_extent_commit(trans, root);
1506
1507         cur_trans->commit_done = 1;
1508
1509         root->fs_info->last_trans_committed = cur_trans->transid;
1510
1511         wake_up(&cur_trans->commit_wait);
1512
1513         spin_lock(&root->fs_info->trans_lock);
1514         list_del_init(&cur_trans->list);
1515         spin_unlock(&root->fs_info->trans_lock);
1516
1517         put_transaction(cur_trans);
1518         put_transaction(cur_trans);
1519
1520         trace_btrfs_transaction_commit(root);
1521
1522         btrfs_scrub_continue(root);
1523
1524         if (current->journal_info == trans)
1525                 current->journal_info = NULL;
1526
1527         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1528
1529         if (current != root->fs_info->transaction_kthread)
1530                 btrfs_run_delayed_iputs(root);
1531
1532         return ret;
1533
1534 cleanup_transaction:
1535         btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1536 //      WARN_ON(1);
1537         if (current->journal_info == trans)
1538                 current->journal_info = NULL;
1539         cleanup_transaction(trans, root, ret);
1540
1541         return ret;
1542 }
1543
1544 /*
1545  * interface function to delete all the snapshots we have scheduled for deletion
1546  */
1547 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1548 {
1549         LIST_HEAD(list);
1550         struct btrfs_fs_info *fs_info = root->fs_info;
1551
1552         spin_lock(&fs_info->trans_lock);
1553         list_splice_init(&fs_info->dead_roots, &list);
1554         spin_unlock(&fs_info->trans_lock);
1555
1556         while (!list_empty(&list)) {
1557                 int ret;
1558
1559                 root = list_entry(list.next, struct btrfs_root, root_list);
1560                 list_del(&root->root_list);
1561
1562                 btrfs_kill_all_delayed_nodes(root);
1563
1564                 if (btrfs_header_backref_rev(root->node) <
1565                     BTRFS_MIXED_BACKREF_REV)
1566                         ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1567                 else
1568                         ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1569                 BUG_ON(ret < 0);
1570         }
1571         return 0;
1572 }