]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/extent-tree.c
Merge tag 'mfd-fixes-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd
[karo-tx-linux.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39
40 #undef SCRAMBLE_DELAYED_REFS
41
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57         CHUNK_ALLOC_NO_FORCE = 0,
58         CHUNK_ALLOC_LIMITED = 1,
59         CHUNK_ALLOC_FORCE = 2,
60 };
61
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72         RESERVE_FREE = 0,
73         RESERVE_ALLOC = 1,
74         RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76
77 static int update_block_group(struct btrfs_trans_handle *trans,
78                               struct btrfs_root *root, u64 bytenr,
79                               u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81                                 struct btrfs_root *root,
82                                 struct btrfs_delayed_ref_node *node, u64 parent,
83                                 u64 root_objectid, u64 owner_objectid,
84                                 u64 owner_offset, int refs_to_drop,
85                                 struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87                                     struct extent_buffer *leaf,
88                                     struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90                                       struct btrfs_root *root,
91                                       u64 parent, u64 root_objectid,
92                                       u64 flags, u64 owner, u64 offset,
93                                       struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95                                      struct btrfs_root *root,
96                                      u64 parent, u64 root_objectid,
97                                      u64 flags, struct btrfs_disk_key *key,
98                                      int level, struct btrfs_key *ins,
99                                      int no_quota);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101                           struct btrfs_root *extent_root, u64 flags,
102                           int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104                          struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106                             int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108                                        u64 num_bytes, int reserve,
109                                        int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111                                u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113                      u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118         smp_mb();
119         return cache->cached == BTRFS_CACHE_FINISHED ||
120                 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125         return (cache->flags & bits) == bits;
126 }
127
128 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130         atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135         if (atomic_dec_and_test(&cache->count)) {
136                 WARN_ON(cache->pinned > 0);
137                 WARN_ON(cache->reserved > 0);
138                 kfree(cache->free_space_ctl);
139                 kfree(cache);
140         }
141 }
142
143 /*
144  * this adds the block group to the fs_info rb tree for the block group
145  * cache
146  */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148                                 struct btrfs_block_group_cache *block_group)
149 {
150         struct rb_node **p;
151         struct rb_node *parent = NULL;
152         struct btrfs_block_group_cache *cache;
153
154         spin_lock(&info->block_group_cache_lock);
155         p = &info->block_group_cache_tree.rb_node;
156
157         while (*p) {
158                 parent = *p;
159                 cache = rb_entry(parent, struct btrfs_block_group_cache,
160                                  cache_node);
161                 if (block_group->key.objectid < cache->key.objectid) {
162                         p = &(*p)->rb_left;
163                 } else if (block_group->key.objectid > cache->key.objectid) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         spin_unlock(&info->block_group_cache_lock);
167                         return -EEXIST;
168                 }
169         }
170
171         rb_link_node(&block_group->cache_node, parent, p);
172         rb_insert_color(&block_group->cache_node,
173                         &info->block_group_cache_tree);
174
175         if (info->first_logical_byte > block_group->key.objectid)
176                 info->first_logical_byte = block_group->key.objectid;
177
178         spin_unlock(&info->block_group_cache_lock);
179
180         return 0;
181 }
182
183 /*
184  * This will return the block group at or after bytenr if contains is 0, else
185  * it will return the block group that contains the bytenr
186  */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189                               int contains)
190 {
191         struct btrfs_block_group_cache *cache, *ret = NULL;
192         struct rb_node *n;
193         u64 end, start;
194
195         spin_lock(&info->block_group_cache_lock);
196         n = info->block_group_cache_tree.rb_node;
197
198         while (n) {
199                 cache = rb_entry(n, struct btrfs_block_group_cache,
200                                  cache_node);
201                 end = cache->key.objectid + cache->key.offset - 1;
202                 start = cache->key.objectid;
203
204                 if (bytenr < start) {
205                         if (!contains && (!ret || start < ret->key.objectid))
206                                 ret = cache;
207                         n = n->rb_left;
208                 } else if (bytenr > start) {
209                         if (contains && bytenr <= end) {
210                                 ret = cache;
211                                 break;
212                         }
213                         n = n->rb_right;
214                 } else {
215                         ret = cache;
216                         break;
217                 }
218         }
219         if (ret) {
220                 btrfs_get_block_group(ret);
221                 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222                         info->first_logical_byte = ret->key.objectid;
223         }
224         spin_unlock(&info->block_group_cache_lock);
225
226         return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230                                u64 start, u64 num_bytes)
231 {
232         u64 end = start + num_bytes - 1;
233         set_extent_bits(&root->fs_info->freed_extents[0],
234                         start, end, EXTENT_UPTODATE, GFP_NOFS);
235         set_extent_bits(&root->fs_info->freed_extents[1],
236                         start, end, EXTENT_UPTODATE, GFP_NOFS);
237         return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241                                   struct btrfs_block_group_cache *cache)
242 {
243         u64 start, end;
244
245         start = cache->key.objectid;
246         end = start + cache->key.offset - 1;
247
248         clear_extent_bits(&root->fs_info->freed_extents[0],
249                           start, end, EXTENT_UPTODATE, GFP_NOFS);
250         clear_extent_bits(&root->fs_info->freed_extents[1],
251                           start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255                                  struct btrfs_block_group_cache *cache)
256 {
257         u64 bytenr;
258         u64 *logical;
259         int stripe_len;
260         int i, nr, ret;
261
262         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264                 cache->bytes_super += stripe_len;
265                 ret = add_excluded_extent(root, cache->key.objectid,
266                                           stripe_len);
267                 if (ret)
268                         return ret;
269         }
270
271         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272                 bytenr = btrfs_sb_offset(i);
273                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274                                        cache->key.objectid, bytenr,
275                                        0, &logical, &nr, &stripe_len);
276                 if (ret)
277                         return ret;
278
279                 while (nr--) {
280                         u64 start, len;
281
282                         if (logical[nr] > cache->key.objectid +
283                             cache->key.offset)
284                                 continue;
285
286                         if (logical[nr] + stripe_len <= cache->key.objectid)
287                                 continue;
288
289                         start = logical[nr];
290                         if (start < cache->key.objectid) {
291                                 start = cache->key.objectid;
292                                 len = (logical[nr] + stripe_len) - start;
293                         } else {
294                                 len = min_t(u64, stripe_len,
295                                             cache->key.objectid +
296                                             cache->key.offset - start);
297                         }
298
299                         cache->bytes_super += len;
300                         ret = add_excluded_extent(root, start, len);
301                         if (ret) {
302                                 kfree(logical);
303                                 return ret;
304                         }
305                 }
306
307                 kfree(logical);
308         }
309         return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315         struct btrfs_caching_control *ctl;
316
317         spin_lock(&cache->lock);
318         if (!cache->caching_ctl) {
319                 spin_unlock(&cache->lock);
320                 return NULL;
321         }
322
323         ctl = cache->caching_ctl;
324         atomic_inc(&ctl->count);
325         spin_unlock(&cache->lock);
326         return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331         if (atomic_dec_and_test(&ctl->count))
332                 kfree(ctl);
333 }
334
335 /*
336  * this is only called by cache_block_group, since we could have freed extents
337  * we need to check the pinned_extents for any extents that can't be used yet
338  * since their free space will be released as soon as the transaction commits.
339  */
340 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
341                               struct btrfs_fs_info *info, u64 start, u64 end)
342 {
343         u64 extent_start, extent_end, size, total_added = 0;
344         int ret;
345
346         while (start < end) {
347                 ret = find_first_extent_bit(info->pinned_extents, start,
348                                             &extent_start, &extent_end,
349                                             EXTENT_DIRTY | EXTENT_UPTODATE,
350                                             NULL);
351                 if (ret)
352                         break;
353
354                 if (extent_start <= start) {
355                         start = extent_end + 1;
356                 } else if (extent_start > start && extent_start < end) {
357                         size = extent_start - start;
358                         total_added += size;
359                         ret = btrfs_add_free_space(block_group, start,
360                                                    size);
361                         BUG_ON(ret); /* -ENOMEM or logic error */
362                         start = extent_end + 1;
363                 } else {
364                         break;
365                 }
366         }
367
368         if (start < end) {
369                 size = end - start;
370                 total_added += size;
371                 ret = btrfs_add_free_space(block_group, start, size);
372                 BUG_ON(ret); /* -ENOMEM or logic error */
373         }
374
375         return total_added;
376 }
377
378 static noinline void caching_thread(struct btrfs_work *work)
379 {
380         struct btrfs_block_group_cache *block_group;
381         struct btrfs_fs_info *fs_info;
382         struct btrfs_caching_control *caching_ctl;
383         struct btrfs_root *extent_root;
384         struct btrfs_path *path;
385         struct extent_buffer *leaf;
386         struct btrfs_key key;
387         u64 total_found = 0;
388         u64 last = 0;
389         u32 nritems;
390         int ret = -ENOMEM;
391
392         caching_ctl = container_of(work, struct btrfs_caching_control, work);
393         block_group = caching_ctl->block_group;
394         fs_info = block_group->fs_info;
395         extent_root = fs_info->extent_root;
396
397         path = btrfs_alloc_path();
398         if (!path)
399                 goto out;
400
401         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
402
403         /*
404          * We don't want to deadlock with somebody trying to allocate a new
405          * extent for the extent root while also trying to search the extent
406          * root to add free space.  So we skip locking and search the commit
407          * root, since its read-only
408          */
409         path->skip_locking = 1;
410         path->search_commit_root = 1;
411         path->reada = 1;
412
413         key.objectid = last;
414         key.offset = 0;
415         key.type = BTRFS_EXTENT_ITEM_KEY;
416 again:
417         mutex_lock(&caching_ctl->mutex);
418         /* need to make sure the commit_root doesn't disappear */
419         down_read(&fs_info->commit_root_sem);
420
421 next:
422         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
423         if (ret < 0)
424                 goto err;
425
426         leaf = path->nodes[0];
427         nritems = btrfs_header_nritems(leaf);
428
429         while (1) {
430                 if (btrfs_fs_closing(fs_info) > 1) {
431                         last = (u64)-1;
432                         break;
433                 }
434
435                 if (path->slots[0] < nritems) {
436                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
437                 } else {
438                         ret = find_next_key(path, 0, &key);
439                         if (ret)
440                                 break;
441
442                         if (need_resched() ||
443                             rwsem_is_contended(&fs_info->commit_root_sem)) {
444                                 caching_ctl->progress = last;
445                                 btrfs_release_path(path);
446                                 up_read(&fs_info->commit_root_sem);
447                                 mutex_unlock(&caching_ctl->mutex);
448                                 cond_resched();
449                                 goto again;
450                         }
451
452                         ret = btrfs_next_leaf(extent_root, path);
453                         if (ret < 0)
454                                 goto err;
455                         if (ret)
456                                 break;
457                         leaf = path->nodes[0];
458                         nritems = btrfs_header_nritems(leaf);
459                         continue;
460                 }
461
462                 if (key.objectid < last) {
463                         key.objectid = last;
464                         key.offset = 0;
465                         key.type = BTRFS_EXTENT_ITEM_KEY;
466
467                         caching_ctl->progress = last;
468                         btrfs_release_path(path);
469                         goto next;
470                 }
471
472                 if (key.objectid < block_group->key.objectid) {
473                         path->slots[0]++;
474                         continue;
475                 }
476
477                 if (key.objectid >= block_group->key.objectid +
478                     block_group->key.offset)
479                         break;
480
481                 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
482                     key.type == BTRFS_METADATA_ITEM_KEY) {
483                         total_found += add_new_free_space(block_group,
484                                                           fs_info, last,
485                                                           key.objectid);
486                         if (key.type == BTRFS_METADATA_ITEM_KEY)
487                                 last = key.objectid +
488                                         fs_info->tree_root->nodesize;
489                         else
490                                 last = key.objectid + key.offset;
491
492                         if (total_found > (1024 * 1024 * 2)) {
493                                 total_found = 0;
494                                 wake_up(&caching_ctl->wait);
495                         }
496                 }
497                 path->slots[0]++;
498         }
499         ret = 0;
500
501         total_found += add_new_free_space(block_group, fs_info, last,
502                                           block_group->key.objectid +
503                                           block_group->key.offset);
504         caching_ctl->progress = (u64)-1;
505
506         spin_lock(&block_group->lock);
507         block_group->caching_ctl = NULL;
508         block_group->cached = BTRFS_CACHE_FINISHED;
509         spin_unlock(&block_group->lock);
510
511 err:
512         btrfs_free_path(path);
513         up_read(&fs_info->commit_root_sem);
514
515         free_excluded_extents(extent_root, block_group);
516
517         mutex_unlock(&caching_ctl->mutex);
518 out:
519         if (ret) {
520                 spin_lock(&block_group->lock);
521                 block_group->caching_ctl = NULL;
522                 block_group->cached = BTRFS_CACHE_ERROR;
523                 spin_unlock(&block_group->lock);
524         }
525         wake_up(&caching_ctl->wait);
526
527         put_caching_control(caching_ctl);
528         btrfs_put_block_group(block_group);
529 }
530
531 static int cache_block_group(struct btrfs_block_group_cache *cache,
532                              int load_cache_only)
533 {
534         DEFINE_WAIT(wait);
535         struct btrfs_fs_info *fs_info = cache->fs_info;
536         struct btrfs_caching_control *caching_ctl;
537         int ret = 0;
538
539         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
540         if (!caching_ctl)
541                 return -ENOMEM;
542
543         INIT_LIST_HEAD(&caching_ctl->list);
544         mutex_init(&caching_ctl->mutex);
545         init_waitqueue_head(&caching_ctl->wait);
546         caching_ctl->block_group = cache;
547         caching_ctl->progress = cache->key.objectid;
548         atomic_set(&caching_ctl->count, 1);
549         btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
550                         caching_thread, NULL, NULL);
551
552         spin_lock(&cache->lock);
553         /*
554          * This should be a rare occasion, but this could happen I think in the
555          * case where one thread starts to load the space cache info, and then
556          * some other thread starts a transaction commit which tries to do an
557          * allocation while the other thread is still loading the space cache
558          * info.  The previous loop should have kept us from choosing this block
559          * group, but if we've moved to the state where we will wait on caching
560          * block groups we need to first check if we're doing a fast load here,
561          * so we can wait for it to finish, otherwise we could end up allocating
562          * from a block group who's cache gets evicted for one reason or
563          * another.
564          */
565         while (cache->cached == BTRFS_CACHE_FAST) {
566                 struct btrfs_caching_control *ctl;
567
568                 ctl = cache->caching_ctl;
569                 atomic_inc(&ctl->count);
570                 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
571                 spin_unlock(&cache->lock);
572
573                 schedule();
574
575                 finish_wait(&ctl->wait, &wait);
576                 put_caching_control(ctl);
577                 spin_lock(&cache->lock);
578         }
579
580         if (cache->cached != BTRFS_CACHE_NO) {
581                 spin_unlock(&cache->lock);
582                 kfree(caching_ctl);
583                 return 0;
584         }
585         WARN_ON(cache->caching_ctl);
586         cache->caching_ctl = caching_ctl;
587         cache->cached = BTRFS_CACHE_FAST;
588         spin_unlock(&cache->lock);
589
590         if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591                 mutex_lock(&caching_ctl->mutex);
592                 ret = load_free_space_cache(fs_info, cache);
593
594                 spin_lock(&cache->lock);
595                 if (ret == 1) {
596                         cache->caching_ctl = NULL;
597                         cache->cached = BTRFS_CACHE_FINISHED;
598                         cache->last_byte_to_unpin = (u64)-1;
599                         caching_ctl->progress = (u64)-1;
600                 } else {
601                         if (load_cache_only) {
602                                 cache->caching_ctl = NULL;
603                                 cache->cached = BTRFS_CACHE_NO;
604                         } else {
605                                 cache->cached = BTRFS_CACHE_STARTED;
606                                 cache->has_caching_ctl = 1;
607                         }
608                 }
609                 spin_unlock(&cache->lock);
610                 mutex_unlock(&caching_ctl->mutex);
611
612                 wake_up(&caching_ctl->wait);
613                 if (ret == 1) {
614                         put_caching_control(caching_ctl);
615                         free_excluded_extents(fs_info->extent_root, cache);
616                         return 0;
617                 }
618         } else {
619                 /*
620                  * We are not going to do the fast caching, set cached to the
621                  * appropriate value and wakeup any waiters.
622                  */
623                 spin_lock(&cache->lock);
624                 if (load_cache_only) {
625                         cache->caching_ctl = NULL;
626                         cache->cached = BTRFS_CACHE_NO;
627                 } else {
628                         cache->cached = BTRFS_CACHE_STARTED;
629                         cache->has_caching_ctl = 1;
630                 }
631                 spin_unlock(&cache->lock);
632                 wake_up(&caching_ctl->wait);
633         }
634
635         if (load_cache_only) {
636                 put_caching_control(caching_ctl);
637                 return 0;
638         }
639
640         down_write(&fs_info->commit_root_sem);
641         atomic_inc(&caching_ctl->count);
642         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
643         up_write(&fs_info->commit_root_sem);
644
645         btrfs_get_block_group(cache);
646
647         btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
648
649         return ret;
650 }
651
652 /*
653  * return the block group that starts at or after bytenr
654  */
655 static struct btrfs_block_group_cache *
656 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
657 {
658         struct btrfs_block_group_cache *cache;
659
660         cache = block_group_cache_tree_search(info, bytenr, 0);
661
662         return cache;
663 }
664
665 /*
666  * return the block group that contains the given bytenr
667  */
668 struct btrfs_block_group_cache *btrfs_lookup_block_group(
669                                                  struct btrfs_fs_info *info,
670                                                  u64 bytenr)
671 {
672         struct btrfs_block_group_cache *cache;
673
674         cache = block_group_cache_tree_search(info, bytenr, 1);
675
676         return cache;
677 }
678
679 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
680                                                   u64 flags)
681 {
682         struct list_head *head = &info->space_info;
683         struct btrfs_space_info *found;
684
685         flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
686
687         rcu_read_lock();
688         list_for_each_entry_rcu(found, head, list) {
689                 if (found->flags & flags) {
690                         rcu_read_unlock();
691                         return found;
692                 }
693         }
694         rcu_read_unlock();
695         return NULL;
696 }
697
698 /*
699  * after adding space to the filesystem, we need to clear the full flags
700  * on all the space infos.
701  */
702 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
703 {
704         struct list_head *head = &info->space_info;
705         struct btrfs_space_info *found;
706
707         rcu_read_lock();
708         list_for_each_entry_rcu(found, head, list)
709                 found->full = 0;
710         rcu_read_unlock();
711 }
712
713 /* simple helper to search for an existing data extent at a given offset */
714 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715 {
716         int ret;
717         struct btrfs_key key;
718         struct btrfs_path *path;
719
720         path = btrfs_alloc_path();
721         if (!path)
722                 return -ENOMEM;
723
724         key.objectid = start;
725         key.offset = len;
726         key.type = BTRFS_EXTENT_ITEM_KEY;
727         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728                                 0, 0);
729         btrfs_free_path(path);
730         return ret;
731 }
732
733 /*
734  * helper function to lookup reference count and flags of a tree block.
735  *
736  * the head node for delayed ref is used to store the sum of all the
737  * reference count modifications queued up in the rbtree. the head
738  * node may also store the extent flags to set. This way you can check
739  * to see what the reference count and extent flags would be if all of
740  * the delayed refs are not processed.
741  */
742 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
743                              struct btrfs_root *root, u64 bytenr,
744                              u64 offset, int metadata, u64 *refs, u64 *flags)
745 {
746         struct btrfs_delayed_ref_head *head;
747         struct btrfs_delayed_ref_root *delayed_refs;
748         struct btrfs_path *path;
749         struct btrfs_extent_item *ei;
750         struct extent_buffer *leaf;
751         struct btrfs_key key;
752         u32 item_size;
753         u64 num_refs;
754         u64 extent_flags;
755         int ret;
756
757         /*
758          * If we don't have skinny metadata, don't bother doing anything
759          * different
760          */
761         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
762                 offset = root->nodesize;
763                 metadata = 0;
764         }
765
766         path = btrfs_alloc_path();
767         if (!path)
768                 return -ENOMEM;
769
770         if (!trans) {
771                 path->skip_locking = 1;
772                 path->search_commit_root = 1;
773         }
774
775 search_again:
776         key.objectid = bytenr;
777         key.offset = offset;
778         if (metadata)
779                 key.type = BTRFS_METADATA_ITEM_KEY;
780         else
781                 key.type = BTRFS_EXTENT_ITEM_KEY;
782
783         ret = btrfs_search_slot(trans, root->fs_info->extent_root,
784                                 &key, path, 0, 0);
785         if (ret < 0)
786                 goto out_free;
787
788         if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
789                 if (path->slots[0]) {
790                         path->slots[0]--;
791                         btrfs_item_key_to_cpu(path->nodes[0], &key,
792                                               path->slots[0]);
793                         if (key.objectid == bytenr &&
794                             key.type == BTRFS_EXTENT_ITEM_KEY &&
795                             key.offset == root->nodesize)
796                                 ret = 0;
797                 }
798         }
799
800         if (ret == 0) {
801                 leaf = path->nodes[0];
802                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
803                 if (item_size >= sizeof(*ei)) {
804                         ei = btrfs_item_ptr(leaf, path->slots[0],
805                                             struct btrfs_extent_item);
806                         num_refs = btrfs_extent_refs(leaf, ei);
807                         extent_flags = btrfs_extent_flags(leaf, ei);
808                 } else {
809 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
810                         struct btrfs_extent_item_v0 *ei0;
811                         BUG_ON(item_size != sizeof(*ei0));
812                         ei0 = btrfs_item_ptr(leaf, path->slots[0],
813                                              struct btrfs_extent_item_v0);
814                         num_refs = btrfs_extent_refs_v0(leaf, ei0);
815                         /* FIXME: this isn't correct for data */
816                         extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
817 #else
818                         BUG();
819 #endif
820                 }
821                 BUG_ON(num_refs == 0);
822         } else {
823                 num_refs = 0;
824                 extent_flags = 0;
825                 ret = 0;
826         }
827
828         if (!trans)
829                 goto out;
830
831         delayed_refs = &trans->transaction->delayed_refs;
832         spin_lock(&delayed_refs->lock);
833         head = btrfs_find_delayed_ref_head(trans, bytenr);
834         if (head) {
835                 if (!mutex_trylock(&head->mutex)) {
836                         atomic_inc(&head->node.refs);
837                         spin_unlock(&delayed_refs->lock);
838
839                         btrfs_release_path(path);
840
841                         /*
842                          * Mutex was contended, block until it's released and try
843                          * again
844                          */
845                         mutex_lock(&head->mutex);
846                         mutex_unlock(&head->mutex);
847                         btrfs_put_delayed_ref(&head->node);
848                         goto search_again;
849                 }
850                 spin_lock(&head->lock);
851                 if (head->extent_op && head->extent_op->update_flags)
852                         extent_flags |= head->extent_op->flags_to_set;
853                 else
854                         BUG_ON(num_refs == 0);
855
856                 num_refs += head->node.ref_mod;
857                 spin_unlock(&head->lock);
858                 mutex_unlock(&head->mutex);
859         }
860         spin_unlock(&delayed_refs->lock);
861 out:
862         WARN_ON(num_refs == 0);
863         if (refs)
864                 *refs = num_refs;
865         if (flags)
866                 *flags = extent_flags;
867 out_free:
868         btrfs_free_path(path);
869         return ret;
870 }
871
872 /*
873  * Back reference rules.  Back refs have three main goals:
874  *
875  * 1) differentiate between all holders of references to an extent so that
876  *    when a reference is dropped we can make sure it was a valid reference
877  *    before freeing the extent.
878  *
879  * 2) Provide enough information to quickly find the holders of an extent
880  *    if we notice a given block is corrupted or bad.
881  *
882  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
883  *    maintenance.  This is actually the same as #2, but with a slightly
884  *    different use case.
885  *
886  * There are two kinds of back refs. The implicit back refs is optimized
887  * for pointers in non-shared tree blocks. For a given pointer in a block,
888  * back refs of this kind provide information about the block's owner tree
889  * and the pointer's key. These information allow us to find the block by
890  * b-tree searching. The full back refs is for pointers in tree blocks not
891  * referenced by their owner trees. The location of tree block is recorded
892  * in the back refs. Actually the full back refs is generic, and can be
893  * used in all cases the implicit back refs is used. The major shortcoming
894  * of the full back refs is its overhead. Every time a tree block gets
895  * COWed, we have to update back refs entry for all pointers in it.
896  *
897  * For a newly allocated tree block, we use implicit back refs for
898  * pointers in it. This means most tree related operations only involve
899  * implicit back refs. For a tree block created in old transaction, the
900  * only way to drop a reference to it is COW it. So we can detect the
901  * event that tree block loses its owner tree's reference and do the
902  * back refs conversion.
903  *
904  * When a tree block is COW'd through a tree, there are four cases:
905  *
906  * The reference count of the block is one and the tree is the block's
907  * owner tree. Nothing to do in this case.
908  *
909  * The reference count of the block is one and the tree is not the
910  * block's owner tree. In this case, full back refs is used for pointers
911  * in the block. Remove these full back refs, add implicit back refs for
912  * every pointers in the new block.
913  *
914  * The reference count of the block is greater than one and the tree is
915  * the block's owner tree. In this case, implicit back refs is used for
916  * pointers in the block. Add full back refs for every pointers in the
917  * block, increase lower level extents' reference counts. The original
918  * implicit back refs are entailed to the new block.
919  *
920  * The reference count of the block is greater than one and the tree is
921  * not the block's owner tree. Add implicit back refs for every pointer in
922  * the new block, increase lower level extents' reference count.
923  *
924  * Back Reference Key composing:
925  *
926  * The key objectid corresponds to the first byte in the extent,
927  * The key type is used to differentiate between types of back refs.
928  * There are different meanings of the key offset for different types
929  * of back refs.
930  *
931  * File extents can be referenced by:
932  *
933  * - multiple snapshots, subvolumes, or different generations in one subvol
934  * - different files inside a single subvolume
935  * - different offsets inside a file (bookend extents in file.c)
936  *
937  * The extent ref structure for the implicit back refs has fields for:
938  *
939  * - Objectid of the subvolume root
940  * - objectid of the file holding the reference
941  * - original offset in the file
942  * - how many bookend extents
943  *
944  * The key offset for the implicit back refs is hash of the first
945  * three fields.
946  *
947  * The extent ref structure for the full back refs has field for:
948  *
949  * - number of pointers in the tree leaf
950  *
951  * The key offset for the implicit back refs is the first byte of
952  * the tree leaf
953  *
954  * When a file extent is allocated, The implicit back refs is used.
955  * the fields are filled in:
956  *
957  *     (root_key.objectid, inode objectid, offset in file, 1)
958  *
959  * When a file extent is removed file truncation, we find the
960  * corresponding implicit back refs and check the following fields:
961  *
962  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
963  *
964  * Btree extents can be referenced by:
965  *
966  * - Different subvolumes
967  *
968  * Both the implicit back refs and the full back refs for tree blocks
969  * only consist of key. The key offset for the implicit back refs is
970  * objectid of block's owner tree. The key offset for the full back refs
971  * is the first byte of parent block.
972  *
973  * When implicit back refs is used, information about the lowest key and
974  * level of the tree block are required. These information are stored in
975  * tree block info structure.
976  */
977
978 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
979 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
980                                   struct btrfs_root *root,
981                                   struct btrfs_path *path,
982                                   u64 owner, u32 extra_size)
983 {
984         struct btrfs_extent_item *item;
985         struct btrfs_extent_item_v0 *ei0;
986         struct btrfs_extent_ref_v0 *ref0;
987         struct btrfs_tree_block_info *bi;
988         struct extent_buffer *leaf;
989         struct btrfs_key key;
990         struct btrfs_key found_key;
991         u32 new_size = sizeof(*item);
992         u64 refs;
993         int ret;
994
995         leaf = path->nodes[0];
996         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
997
998         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999         ei0 = btrfs_item_ptr(leaf, path->slots[0],
1000                              struct btrfs_extent_item_v0);
1001         refs = btrfs_extent_refs_v0(leaf, ei0);
1002
1003         if (owner == (u64)-1) {
1004                 while (1) {
1005                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1006                                 ret = btrfs_next_leaf(root, path);
1007                                 if (ret < 0)
1008                                         return ret;
1009                                 BUG_ON(ret > 0); /* Corruption */
1010                                 leaf = path->nodes[0];
1011                         }
1012                         btrfs_item_key_to_cpu(leaf, &found_key,
1013                                               path->slots[0]);
1014                         BUG_ON(key.objectid != found_key.objectid);
1015                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1016                                 path->slots[0]++;
1017                                 continue;
1018                         }
1019                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1020                                               struct btrfs_extent_ref_v0);
1021                         owner = btrfs_ref_objectid_v0(leaf, ref0);
1022                         break;
1023                 }
1024         }
1025         btrfs_release_path(path);
1026
1027         if (owner < BTRFS_FIRST_FREE_OBJECTID)
1028                 new_size += sizeof(*bi);
1029
1030         new_size -= sizeof(*ei0);
1031         ret = btrfs_search_slot(trans, root, &key, path,
1032                                 new_size + extra_size, 1);
1033         if (ret < 0)
1034                 return ret;
1035         BUG_ON(ret); /* Corruption */
1036
1037         btrfs_extend_item(root, path, new_size);
1038
1039         leaf = path->nodes[0];
1040         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1041         btrfs_set_extent_refs(leaf, item, refs);
1042         /* FIXME: get real generation */
1043         btrfs_set_extent_generation(leaf, item, 0);
1044         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1045                 btrfs_set_extent_flags(leaf, item,
1046                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
1047                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
1048                 bi = (struct btrfs_tree_block_info *)(item + 1);
1049                 /* FIXME: get first key of the block */
1050                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1051                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1052         } else {
1053                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1054         }
1055         btrfs_mark_buffer_dirty(leaf);
1056         return 0;
1057 }
1058 #endif
1059
1060 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1061 {
1062         u32 high_crc = ~(u32)0;
1063         u32 low_crc = ~(u32)0;
1064         __le64 lenum;
1065
1066         lenum = cpu_to_le64(root_objectid);
1067         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1068         lenum = cpu_to_le64(owner);
1069         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1070         lenum = cpu_to_le64(offset);
1071         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1072
1073         return ((u64)high_crc << 31) ^ (u64)low_crc;
1074 }
1075
1076 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1077                                      struct btrfs_extent_data_ref *ref)
1078 {
1079         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1080                                     btrfs_extent_data_ref_objectid(leaf, ref),
1081                                     btrfs_extent_data_ref_offset(leaf, ref));
1082 }
1083
1084 static int match_extent_data_ref(struct extent_buffer *leaf,
1085                                  struct btrfs_extent_data_ref *ref,
1086                                  u64 root_objectid, u64 owner, u64 offset)
1087 {
1088         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1089             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1090             btrfs_extent_data_ref_offset(leaf, ref) != offset)
1091                 return 0;
1092         return 1;
1093 }
1094
1095 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1096                                            struct btrfs_root *root,
1097                                            struct btrfs_path *path,
1098                                            u64 bytenr, u64 parent,
1099                                            u64 root_objectid,
1100                                            u64 owner, u64 offset)
1101 {
1102         struct btrfs_key key;
1103         struct btrfs_extent_data_ref *ref;
1104         struct extent_buffer *leaf;
1105         u32 nritems;
1106         int ret;
1107         int recow;
1108         int err = -ENOENT;
1109
1110         key.objectid = bytenr;
1111         if (parent) {
1112                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1113                 key.offset = parent;
1114         } else {
1115                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1116                 key.offset = hash_extent_data_ref(root_objectid,
1117                                                   owner, offset);
1118         }
1119 again:
1120         recow = 0;
1121         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1122         if (ret < 0) {
1123                 err = ret;
1124                 goto fail;
1125         }
1126
1127         if (parent) {
1128                 if (!ret)
1129                         return 0;
1130 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1131                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1132                 btrfs_release_path(path);
1133                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1134                 if (ret < 0) {
1135                         err = ret;
1136                         goto fail;
1137                 }
1138                 if (!ret)
1139                         return 0;
1140 #endif
1141                 goto fail;
1142         }
1143
1144         leaf = path->nodes[0];
1145         nritems = btrfs_header_nritems(leaf);
1146         while (1) {
1147                 if (path->slots[0] >= nritems) {
1148                         ret = btrfs_next_leaf(root, path);
1149                         if (ret < 0)
1150                                 err = ret;
1151                         if (ret)
1152                                 goto fail;
1153
1154                         leaf = path->nodes[0];
1155                         nritems = btrfs_header_nritems(leaf);
1156                         recow = 1;
1157                 }
1158
1159                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1160                 if (key.objectid != bytenr ||
1161                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
1162                         goto fail;
1163
1164                 ref = btrfs_item_ptr(leaf, path->slots[0],
1165                                      struct btrfs_extent_data_ref);
1166
1167                 if (match_extent_data_ref(leaf, ref, root_objectid,
1168                                           owner, offset)) {
1169                         if (recow) {
1170                                 btrfs_release_path(path);
1171                                 goto again;
1172                         }
1173                         err = 0;
1174                         break;
1175                 }
1176                 path->slots[0]++;
1177         }
1178 fail:
1179         return err;
1180 }
1181
1182 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1183                                            struct btrfs_root *root,
1184                                            struct btrfs_path *path,
1185                                            u64 bytenr, u64 parent,
1186                                            u64 root_objectid, u64 owner,
1187                                            u64 offset, int refs_to_add)
1188 {
1189         struct btrfs_key key;
1190         struct extent_buffer *leaf;
1191         u32 size;
1192         u32 num_refs;
1193         int ret;
1194
1195         key.objectid = bytenr;
1196         if (parent) {
1197                 key.type = BTRFS_SHARED_DATA_REF_KEY;
1198                 key.offset = parent;
1199                 size = sizeof(struct btrfs_shared_data_ref);
1200         } else {
1201                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1202                 key.offset = hash_extent_data_ref(root_objectid,
1203                                                   owner, offset);
1204                 size = sizeof(struct btrfs_extent_data_ref);
1205         }
1206
1207         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1208         if (ret && ret != -EEXIST)
1209                 goto fail;
1210
1211         leaf = path->nodes[0];
1212         if (parent) {
1213                 struct btrfs_shared_data_ref *ref;
1214                 ref = btrfs_item_ptr(leaf, path->slots[0],
1215                                      struct btrfs_shared_data_ref);
1216                 if (ret == 0) {
1217                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1218                 } else {
1219                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
1220                         num_refs += refs_to_add;
1221                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1222                 }
1223         } else {
1224                 struct btrfs_extent_data_ref *ref;
1225                 while (ret == -EEXIST) {
1226                         ref = btrfs_item_ptr(leaf, path->slots[0],
1227                                              struct btrfs_extent_data_ref);
1228                         if (match_extent_data_ref(leaf, ref, root_objectid,
1229                                                   owner, offset))
1230                                 break;
1231                         btrfs_release_path(path);
1232                         key.offset++;
1233                         ret = btrfs_insert_empty_item(trans, root, path, &key,
1234                                                       size);
1235                         if (ret && ret != -EEXIST)
1236                                 goto fail;
1237
1238                         leaf = path->nodes[0];
1239                 }
1240                 ref = btrfs_item_ptr(leaf, path->slots[0],
1241                                      struct btrfs_extent_data_ref);
1242                 if (ret == 0) {
1243                         btrfs_set_extent_data_ref_root(leaf, ref,
1244                                                        root_objectid);
1245                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1246                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1247                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1248                 } else {
1249                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
1250                         num_refs += refs_to_add;
1251                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1252                 }
1253         }
1254         btrfs_mark_buffer_dirty(leaf);
1255         ret = 0;
1256 fail:
1257         btrfs_release_path(path);
1258         return ret;
1259 }
1260
1261 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1262                                            struct btrfs_root *root,
1263                                            struct btrfs_path *path,
1264                                            int refs_to_drop, int *last_ref)
1265 {
1266         struct btrfs_key key;
1267         struct btrfs_extent_data_ref *ref1 = NULL;
1268         struct btrfs_shared_data_ref *ref2 = NULL;
1269         struct extent_buffer *leaf;
1270         u32 num_refs = 0;
1271         int ret = 0;
1272
1273         leaf = path->nodes[0];
1274         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1275
1276         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1277                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1278                                       struct btrfs_extent_data_ref);
1279                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1280         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1281                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1282                                       struct btrfs_shared_data_ref);
1283                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1284 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1285         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1286                 struct btrfs_extent_ref_v0 *ref0;
1287                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1288                                       struct btrfs_extent_ref_v0);
1289                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1290 #endif
1291         } else {
1292                 BUG();
1293         }
1294
1295         BUG_ON(num_refs < refs_to_drop);
1296         num_refs -= refs_to_drop;
1297
1298         if (num_refs == 0) {
1299                 ret = btrfs_del_item(trans, root, path);
1300                 *last_ref = 1;
1301         } else {
1302                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1303                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1304                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1305                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1306 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1307                 else {
1308                         struct btrfs_extent_ref_v0 *ref0;
1309                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1310                                         struct btrfs_extent_ref_v0);
1311                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1312                 }
1313 #endif
1314                 btrfs_mark_buffer_dirty(leaf);
1315         }
1316         return ret;
1317 }
1318
1319 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1320                                           struct btrfs_extent_inline_ref *iref)
1321 {
1322         struct btrfs_key key;
1323         struct extent_buffer *leaf;
1324         struct btrfs_extent_data_ref *ref1;
1325         struct btrfs_shared_data_ref *ref2;
1326         u32 num_refs = 0;
1327
1328         leaf = path->nodes[0];
1329         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1330         if (iref) {
1331                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1332                     BTRFS_EXTENT_DATA_REF_KEY) {
1333                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1334                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1335                 } else {
1336                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1337                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1338                 }
1339         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1340                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1341                                       struct btrfs_extent_data_ref);
1342                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1343         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1344                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1345                                       struct btrfs_shared_data_ref);
1346                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1348         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1349                 struct btrfs_extent_ref_v0 *ref0;
1350                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1351                                       struct btrfs_extent_ref_v0);
1352                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1353 #endif
1354         } else {
1355                 WARN_ON(1);
1356         }
1357         return num_refs;
1358 }
1359
1360 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1361                                           struct btrfs_root *root,
1362                                           struct btrfs_path *path,
1363                                           u64 bytenr, u64 parent,
1364                                           u64 root_objectid)
1365 {
1366         struct btrfs_key key;
1367         int ret;
1368
1369         key.objectid = bytenr;
1370         if (parent) {
1371                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1372                 key.offset = parent;
1373         } else {
1374                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1375                 key.offset = root_objectid;
1376         }
1377
1378         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1379         if (ret > 0)
1380                 ret = -ENOENT;
1381 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1382         if (ret == -ENOENT && parent) {
1383                 btrfs_release_path(path);
1384                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1385                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1386                 if (ret > 0)
1387                         ret = -ENOENT;
1388         }
1389 #endif
1390         return ret;
1391 }
1392
1393 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1394                                           struct btrfs_root *root,
1395                                           struct btrfs_path *path,
1396                                           u64 bytenr, u64 parent,
1397                                           u64 root_objectid)
1398 {
1399         struct btrfs_key key;
1400         int ret;
1401
1402         key.objectid = bytenr;
1403         if (parent) {
1404                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1405                 key.offset = parent;
1406         } else {
1407                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1408                 key.offset = root_objectid;
1409         }
1410
1411         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1412         btrfs_release_path(path);
1413         return ret;
1414 }
1415
1416 static inline int extent_ref_type(u64 parent, u64 owner)
1417 {
1418         int type;
1419         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1420                 if (parent > 0)
1421                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1422                 else
1423                         type = BTRFS_TREE_BLOCK_REF_KEY;
1424         } else {
1425                 if (parent > 0)
1426                         type = BTRFS_SHARED_DATA_REF_KEY;
1427                 else
1428                         type = BTRFS_EXTENT_DATA_REF_KEY;
1429         }
1430         return type;
1431 }
1432
1433 static int find_next_key(struct btrfs_path *path, int level,
1434                          struct btrfs_key *key)
1435
1436 {
1437         for (; level < BTRFS_MAX_LEVEL; level++) {
1438                 if (!path->nodes[level])
1439                         break;
1440                 if (path->slots[level] + 1 >=
1441                     btrfs_header_nritems(path->nodes[level]))
1442                         continue;
1443                 if (level == 0)
1444                         btrfs_item_key_to_cpu(path->nodes[level], key,
1445                                               path->slots[level] + 1);
1446                 else
1447                         btrfs_node_key_to_cpu(path->nodes[level], key,
1448                                               path->slots[level] + 1);
1449                 return 0;
1450         }
1451         return 1;
1452 }
1453
1454 /*
1455  * look for inline back ref. if back ref is found, *ref_ret is set
1456  * to the address of inline back ref, and 0 is returned.
1457  *
1458  * if back ref isn't found, *ref_ret is set to the address where it
1459  * should be inserted, and -ENOENT is returned.
1460  *
1461  * if insert is true and there are too many inline back refs, the path
1462  * points to the extent item, and -EAGAIN is returned.
1463  *
1464  * NOTE: inline back refs are ordered in the same way that back ref
1465  *       items in the tree are ordered.
1466  */
1467 static noinline_for_stack
1468 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1469                                  struct btrfs_root *root,
1470                                  struct btrfs_path *path,
1471                                  struct btrfs_extent_inline_ref **ref_ret,
1472                                  u64 bytenr, u64 num_bytes,
1473                                  u64 parent, u64 root_objectid,
1474                                  u64 owner, u64 offset, int insert)
1475 {
1476         struct btrfs_key key;
1477         struct extent_buffer *leaf;
1478         struct btrfs_extent_item *ei;
1479         struct btrfs_extent_inline_ref *iref;
1480         u64 flags;
1481         u64 item_size;
1482         unsigned long ptr;
1483         unsigned long end;
1484         int extra_size;
1485         int type;
1486         int want;
1487         int ret;
1488         int err = 0;
1489         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1490                                                  SKINNY_METADATA);
1491
1492         key.objectid = bytenr;
1493         key.type = BTRFS_EXTENT_ITEM_KEY;
1494         key.offset = num_bytes;
1495
1496         want = extent_ref_type(parent, owner);
1497         if (insert) {
1498                 extra_size = btrfs_extent_inline_ref_size(want);
1499                 path->keep_locks = 1;
1500         } else
1501                 extra_size = -1;
1502
1503         /*
1504          * Owner is our parent level, so we can just add one to get the level
1505          * for the block we are interested in.
1506          */
1507         if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1508                 key.type = BTRFS_METADATA_ITEM_KEY;
1509                 key.offset = owner;
1510         }
1511
1512 again:
1513         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1514         if (ret < 0) {
1515                 err = ret;
1516                 goto out;
1517         }
1518
1519         /*
1520          * We may be a newly converted file system which still has the old fat
1521          * extent entries for metadata, so try and see if we have one of those.
1522          */
1523         if (ret > 0 && skinny_metadata) {
1524                 skinny_metadata = false;
1525                 if (path->slots[0]) {
1526                         path->slots[0]--;
1527                         btrfs_item_key_to_cpu(path->nodes[0], &key,
1528                                               path->slots[0]);
1529                         if (key.objectid == bytenr &&
1530                             key.type == BTRFS_EXTENT_ITEM_KEY &&
1531                             key.offset == num_bytes)
1532                                 ret = 0;
1533                 }
1534                 if (ret) {
1535                         key.objectid = bytenr;
1536                         key.type = BTRFS_EXTENT_ITEM_KEY;
1537                         key.offset = num_bytes;
1538                         btrfs_release_path(path);
1539                         goto again;
1540                 }
1541         }
1542
1543         if (ret && !insert) {
1544                 err = -ENOENT;
1545                 goto out;
1546         } else if (WARN_ON(ret)) {
1547                 err = -EIO;
1548                 goto out;
1549         }
1550
1551         leaf = path->nodes[0];
1552         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1553 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1554         if (item_size < sizeof(*ei)) {
1555                 if (!insert) {
1556                         err = -ENOENT;
1557                         goto out;
1558                 }
1559                 ret = convert_extent_item_v0(trans, root, path, owner,
1560                                              extra_size);
1561                 if (ret < 0) {
1562                         err = ret;
1563                         goto out;
1564                 }
1565                 leaf = path->nodes[0];
1566                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1567         }
1568 #endif
1569         BUG_ON(item_size < sizeof(*ei));
1570
1571         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1572         flags = btrfs_extent_flags(leaf, ei);
1573
1574         ptr = (unsigned long)(ei + 1);
1575         end = (unsigned long)ei + item_size;
1576
1577         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1578                 ptr += sizeof(struct btrfs_tree_block_info);
1579                 BUG_ON(ptr > end);
1580         }
1581
1582         err = -ENOENT;
1583         while (1) {
1584                 if (ptr >= end) {
1585                         WARN_ON(ptr > end);
1586                         break;
1587                 }
1588                 iref = (struct btrfs_extent_inline_ref *)ptr;
1589                 type = btrfs_extent_inline_ref_type(leaf, iref);
1590                 if (want < type)
1591                         break;
1592                 if (want > type) {
1593                         ptr += btrfs_extent_inline_ref_size(type);
1594                         continue;
1595                 }
1596
1597                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1598                         struct btrfs_extent_data_ref *dref;
1599                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1600                         if (match_extent_data_ref(leaf, dref, root_objectid,
1601                                                   owner, offset)) {
1602                                 err = 0;
1603                                 break;
1604                         }
1605                         if (hash_extent_data_ref_item(leaf, dref) <
1606                             hash_extent_data_ref(root_objectid, owner, offset))
1607                                 break;
1608                 } else {
1609                         u64 ref_offset;
1610                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1611                         if (parent > 0) {
1612                                 if (parent == ref_offset) {
1613                                         err = 0;
1614                                         break;
1615                                 }
1616                                 if (ref_offset < parent)
1617                                         break;
1618                         } else {
1619                                 if (root_objectid == ref_offset) {
1620                                         err = 0;
1621                                         break;
1622                                 }
1623                                 if (ref_offset < root_objectid)
1624                                         break;
1625                         }
1626                 }
1627                 ptr += btrfs_extent_inline_ref_size(type);
1628         }
1629         if (err == -ENOENT && insert) {
1630                 if (item_size + extra_size >=
1631                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1632                         err = -EAGAIN;
1633                         goto out;
1634                 }
1635                 /*
1636                  * To add new inline back ref, we have to make sure
1637                  * there is no corresponding back ref item.
1638                  * For simplicity, we just do not add new inline back
1639                  * ref if there is any kind of item for this block
1640                  */
1641                 if (find_next_key(path, 0, &key) == 0 &&
1642                     key.objectid == bytenr &&
1643                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1644                         err = -EAGAIN;
1645                         goto out;
1646                 }
1647         }
1648         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1649 out:
1650         if (insert) {
1651                 path->keep_locks = 0;
1652                 btrfs_unlock_up_safe(path, 1);
1653         }
1654         return err;
1655 }
1656
1657 /*
1658  * helper to add new inline back ref
1659  */
1660 static noinline_for_stack
1661 void setup_inline_extent_backref(struct btrfs_root *root,
1662                                  struct btrfs_path *path,
1663                                  struct btrfs_extent_inline_ref *iref,
1664                                  u64 parent, u64 root_objectid,
1665                                  u64 owner, u64 offset, int refs_to_add,
1666                                  struct btrfs_delayed_extent_op *extent_op)
1667 {
1668         struct extent_buffer *leaf;
1669         struct btrfs_extent_item *ei;
1670         unsigned long ptr;
1671         unsigned long end;
1672         unsigned long item_offset;
1673         u64 refs;
1674         int size;
1675         int type;
1676
1677         leaf = path->nodes[0];
1678         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1679         item_offset = (unsigned long)iref - (unsigned long)ei;
1680
1681         type = extent_ref_type(parent, owner);
1682         size = btrfs_extent_inline_ref_size(type);
1683
1684         btrfs_extend_item(root, path, size);
1685
1686         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1687         refs = btrfs_extent_refs(leaf, ei);
1688         refs += refs_to_add;
1689         btrfs_set_extent_refs(leaf, ei, refs);
1690         if (extent_op)
1691                 __run_delayed_extent_op(extent_op, leaf, ei);
1692
1693         ptr = (unsigned long)ei + item_offset;
1694         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1695         if (ptr < end - size)
1696                 memmove_extent_buffer(leaf, ptr + size, ptr,
1697                                       end - size - ptr);
1698
1699         iref = (struct btrfs_extent_inline_ref *)ptr;
1700         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1701         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1702                 struct btrfs_extent_data_ref *dref;
1703                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1704                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1705                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1706                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1707                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1708         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1709                 struct btrfs_shared_data_ref *sref;
1710                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1711                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1712                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1713         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1714                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1715         } else {
1716                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1717         }
1718         btrfs_mark_buffer_dirty(leaf);
1719 }
1720
1721 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1722                                  struct btrfs_root *root,
1723                                  struct btrfs_path *path,
1724                                  struct btrfs_extent_inline_ref **ref_ret,
1725                                  u64 bytenr, u64 num_bytes, u64 parent,
1726                                  u64 root_objectid, u64 owner, u64 offset)
1727 {
1728         int ret;
1729
1730         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1731                                            bytenr, num_bytes, parent,
1732                                            root_objectid, owner, offset, 0);
1733         if (ret != -ENOENT)
1734                 return ret;
1735
1736         btrfs_release_path(path);
1737         *ref_ret = NULL;
1738
1739         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1740                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1741                                             root_objectid);
1742         } else {
1743                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1744                                              root_objectid, owner, offset);
1745         }
1746         return ret;
1747 }
1748
1749 /*
1750  * helper to update/remove inline back ref
1751  */
1752 static noinline_for_stack
1753 void update_inline_extent_backref(struct btrfs_root *root,
1754                                   struct btrfs_path *path,
1755                                   struct btrfs_extent_inline_ref *iref,
1756                                   int refs_to_mod,
1757                                   struct btrfs_delayed_extent_op *extent_op,
1758                                   int *last_ref)
1759 {
1760         struct extent_buffer *leaf;
1761         struct btrfs_extent_item *ei;
1762         struct btrfs_extent_data_ref *dref = NULL;
1763         struct btrfs_shared_data_ref *sref = NULL;
1764         unsigned long ptr;
1765         unsigned long end;
1766         u32 item_size;
1767         int size;
1768         int type;
1769         u64 refs;
1770
1771         leaf = path->nodes[0];
1772         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1773         refs = btrfs_extent_refs(leaf, ei);
1774         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1775         refs += refs_to_mod;
1776         btrfs_set_extent_refs(leaf, ei, refs);
1777         if (extent_op)
1778                 __run_delayed_extent_op(extent_op, leaf, ei);
1779
1780         type = btrfs_extent_inline_ref_type(leaf, iref);
1781
1782         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1783                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1784                 refs = btrfs_extent_data_ref_count(leaf, dref);
1785         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1786                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1787                 refs = btrfs_shared_data_ref_count(leaf, sref);
1788         } else {
1789                 refs = 1;
1790                 BUG_ON(refs_to_mod != -1);
1791         }
1792
1793         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1794         refs += refs_to_mod;
1795
1796         if (refs > 0) {
1797                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1798                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1799                 else
1800                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1801         } else {
1802                 *last_ref = 1;
1803                 size =  btrfs_extent_inline_ref_size(type);
1804                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1805                 ptr = (unsigned long)iref;
1806                 end = (unsigned long)ei + item_size;
1807                 if (ptr + size < end)
1808                         memmove_extent_buffer(leaf, ptr, ptr + size,
1809                                               end - ptr - size);
1810                 item_size -= size;
1811                 btrfs_truncate_item(root, path, item_size, 1);
1812         }
1813         btrfs_mark_buffer_dirty(leaf);
1814 }
1815
1816 static noinline_for_stack
1817 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1818                                  struct btrfs_root *root,
1819                                  struct btrfs_path *path,
1820                                  u64 bytenr, u64 num_bytes, u64 parent,
1821                                  u64 root_objectid, u64 owner,
1822                                  u64 offset, int refs_to_add,
1823                                  struct btrfs_delayed_extent_op *extent_op)
1824 {
1825         struct btrfs_extent_inline_ref *iref;
1826         int ret;
1827
1828         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1829                                            bytenr, num_bytes, parent,
1830                                            root_objectid, owner, offset, 1);
1831         if (ret == 0) {
1832                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1833                 update_inline_extent_backref(root, path, iref,
1834                                              refs_to_add, extent_op, NULL);
1835         } else if (ret == -ENOENT) {
1836                 setup_inline_extent_backref(root, path, iref, parent,
1837                                             root_objectid, owner, offset,
1838                                             refs_to_add, extent_op);
1839                 ret = 0;
1840         }
1841         return ret;
1842 }
1843
1844 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1845                                  struct btrfs_root *root,
1846                                  struct btrfs_path *path,
1847                                  u64 bytenr, u64 parent, u64 root_objectid,
1848                                  u64 owner, u64 offset, int refs_to_add)
1849 {
1850         int ret;
1851         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1852                 BUG_ON(refs_to_add != 1);
1853                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1854                                             parent, root_objectid);
1855         } else {
1856                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1857                                              parent, root_objectid,
1858                                              owner, offset, refs_to_add);
1859         }
1860         return ret;
1861 }
1862
1863 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1864                                  struct btrfs_root *root,
1865                                  struct btrfs_path *path,
1866                                  struct btrfs_extent_inline_ref *iref,
1867                                  int refs_to_drop, int is_data, int *last_ref)
1868 {
1869         int ret = 0;
1870
1871         BUG_ON(!is_data && refs_to_drop != 1);
1872         if (iref) {
1873                 update_inline_extent_backref(root, path, iref,
1874                                              -refs_to_drop, NULL, last_ref);
1875         } else if (is_data) {
1876                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1877                                              last_ref);
1878         } else {
1879                 *last_ref = 1;
1880                 ret = btrfs_del_item(trans, root, path);
1881         }
1882         return ret;
1883 }
1884
1885 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1886 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1887                                u64 *discarded_bytes)
1888 {
1889         int j, ret = 0;
1890         u64 bytes_left, end;
1891         u64 aligned_start = ALIGN(start, 1 << 9);
1892
1893         if (WARN_ON(start != aligned_start)) {
1894                 len -= aligned_start - start;
1895                 len = round_down(len, 1 << 9);
1896                 start = aligned_start;
1897         }
1898
1899         *discarded_bytes = 0;
1900
1901         if (!len)
1902                 return 0;
1903
1904         end = start + len;
1905         bytes_left = len;
1906
1907         /* Skip any superblocks on this device. */
1908         for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1909                 u64 sb_start = btrfs_sb_offset(j);
1910                 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1911                 u64 size = sb_start - start;
1912
1913                 if (!in_range(sb_start, start, bytes_left) &&
1914                     !in_range(sb_end, start, bytes_left) &&
1915                     !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1916                         continue;
1917
1918                 /*
1919                  * Superblock spans beginning of range.  Adjust start and
1920                  * try again.
1921                  */
1922                 if (sb_start <= start) {
1923                         start += sb_end - start;
1924                         if (start > end) {
1925                                 bytes_left = 0;
1926                                 break;
1927                         }
1928                         bytes_left = end - start;
1929                         continue;
1930                 }
1931
1932                 if (size) {
1933                         ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1934                                                    GFP_NOFS, 0);
1935                         if (!ret)
1936                                 *discarded_bytes += size;
1937                         else if (ret != -EOPNOTSUPP)
1938                                 return ret;
1939                 }
1940
1941                 start = sb_end;
1942                 if (start > end) {
1943                         bytes_left = 0;
1944                         break;
1945                 }
1946                 bytes_left = end - start;
1947         }
1948
1949         if (bytes_left) {
1950                 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
1951                                            GFP_NOFS, 0);
1952                 if (!ret)
1953                         *discarded_bytes += bytes_left;
1954         }
1955         return ret;
1956 }
1957
1958 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1959                          u64 num_bytes, u64 *actual_bytes)
1960 {
1961         int ret;
1962         u64 discarded_bytes = 0;
1963         struct btrfs_bio *bbio = NULL;
1964
1965
1966         /* Tell the block device(s) that the sectors can be discarded */
1967         ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1968                               bytenr, &num_bytes, &bbio, 0);
1969         /* Error condition is -ENOMEM */
1970         if (!ret) {
1971                 struct btrfs_bio_stripe *stripe = bbio->stripes;
1972                 int i;
1973
1974
1975                 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1976                         u64 bytes;
1977                         if (!stripe->dev->can_discard)
1978                                 continue;
1979
1980                         ret = btrfs_issue_discard(stripe->dev->bdev,
1981                                                   stripe->physical,
1982                                                   stripe->length,
1983                                                   &bytes);
1984                         if (!ret)
1985                                 discarded_bytes += bytes;
1986                         else if (ret != -EOPNOTSUPP)
1987                                 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1988
1989                         /*
1990                          * Just in case we get back EOPNOTSUPP for some reason,
1991                          * just ignore the return value so we don't screw up
1992                          * people calling discard_extent.
1993                          */
1994                         ret = 0;
1995                 }
1996                 btrfs_put_bbio(bbio);
1997         }
1998
1999         if (actual_bytes)
2000                 *actual_bytes = discarded_bytes;
2001
2002
2003         if (ret == -EOPNOTSUPP)
2004                 ret = 0;
2005         return ret;
2006 }
2007
2008 /* Can return -ENOMEM */
2009 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2010                          struct btrfs_root *root,
2011                          u64 bytenr, u64 num_bytes, u64 parent,
2012                          u64 root_objectid, u64 owner, u64 offset,
2013                          int no_quota)
2014 {
2015         int ret;
2016         struct btrfs_fs_info *fs_info = root->fs_info;
2017
2018         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2019                root_objectid == BTRFS_TREE_LOG_OBJECTID);
2020
2021         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2022                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2023                                         num_bytes,
2024                                         parent, root_objectid, (int)owner,
2025                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2026         } else {
2027                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2028                                         num_bytes,
2029                                         parent, root_objectid, owner, offset,
2030                                         BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2031         }
2032         return ret;
2033 }
2034
2035 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2036                                   struct btrfs_root *root,
2037                                   struct btrfs_delayed_ref_node *node,
2038                                   u64 parent, u64 root_objectid,
2039                                   u64 owner, u64 offset, int refs_to_add,
2040                                   struct btrfs_delayed_extent_op *extent_op)
2041 {
2042         struct btrfs_fs_info *fs_info = root->fs_info;
2043         struct btrfs_path *path;
2044         struct extent_buffer *leaf;
2045         struct btrfs_extent_item *item;
2046         struct btrfs_key key;
2047         u64 bytenr = node->bytenr;
2048         u64 num_bytes = node->num_bytes;
2049         u64 refs;
2050         int ret;
2051         int no_quota = node->no_quota;
2052
2053         path = btrfs_alloc_path();
2054         if (!path)
2055                 return -ENOMEM;
2056
2057         if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
2058                 no_quota = 1;
2059
2060         path->reada = 1;
2061         path->leave_spinning = 1;
2062         /* this will setup the path even if it fails to insert the back ref */
2063         ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2064                                            bytenr, num_bytes, parent,
2065                                            root_objectid, owner, offset,
2066                                            refs_to_add, extent_op);
2067         if ((ret < 0 && ret != -EAGAIN) || !ret)
2068                 goto out;
2069
2070         /*
2071          * Ok we had -EAGAIN which means we didn't have space to insert and
2072          * inline extent ref, so just update the reference count and add a
2073          * normal backref.
2074          */
2075         leaf = path->nodes[0];
2076         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2077         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2078         refs = btrfs_extent_refs(leaf, item);
2079         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2080         if (extent_op)
2081                 __run_delayed_extent_op(extent_op, leaf, item);
2082
2083         btrfs_mark_buffer_dirty(leaf);
2084         btrfs_release_path(path);
2085
2086         path->reada = 1;
2087         path->leave_spinning = 1;
2088         /* now insert the actual backref */
2089         ret = insert_extent_backref(trans, root->fs_info->extent_root,
2090                                     path, bytenr, parent, root_objectid,
2091                                     owner, offset, refs_to_add);
2092         if (ret)
2093                 btrfs_abort_transaction(trans, root, ret);
2094 out:
2095         btrfs_free_path(path);
2096         return ret;
2097 }
2098
2099 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2100                                 struct btrfs_root *root,
2101                                 struct btrfs_delayed_ref_node *node,
2102                                 struct btrfs_delayed_extent_op *extent_op,
2103                                 int insert_reserved)
2104 {
2105         int ret = 0;
2106         struct btrfs_delayed_data_ref *ref;
2107         struct btrfs_key ins;
2108         u64 parent = 0;
2109         u64 ref_root = 0;
2110         u64 flags = 0;
2111
2112         ins.objectid = node->bytenr;
2113         ins.offset = node->num_bytes;
2114         ins.type = BTRFS_EXTENT_ITEM_KEY;
2115
2116         ref = btrfs_delayed_node_to_data_ref(node);
2117         trace_run_delayed_data_ref(node, ref, node->action);
2118
2119         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2120                 parent = ref->parent;
2121         ref_root = ref->root;
2122
2123         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2124                 if (extent_op)
2125                         flags |= extent_op->flags_to_set;
2126                 ret = alloc_reserved_file_extent(trans, root,
2127                                                  parent, ref_root, flags,
2128                                                  ref->objectid, ref->offset,
2129                                                  &ins, node->ref_mod);
2130         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2131                 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2132                                              ref_root, ref->objectid,
2133                                              ref->offset, node->ref_mod,
2134                                              extent_op);
2135         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2136                 ret = __btrfs_free_extent(trans, root, node, parent,
2137                                           ref_root, ref->objectid,
2138                                           ref->offset, node->ref_mod,
2139                                           extent_op);
2140         } else {
2141                 BUG();
2142         }
2143         return ret;
2144 }
2145
2146 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2147                                     struct extent_buffer *leaf,
2148                                     struct btrfs_extent_item *ei)
2149 {
2150         u64 flags = btrfs_extent_flags(leaf, ei);
2151         if (extent_op->update_flags) {
2152                 flags |= extent_op->flags_to_set;
2153                 btrfs_set_extent_flags(leaf, ei, flags);
2154         }
2155
2156         if (extent_op->update_key) {
2157                 struct btrfs_tree_block_info *bi;
2158                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2159                 bi = (struct btrfs_tree_block_info *)(ei + 1);
2160                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2161         }
2162 }
2163
2164 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2165                                  struct btrfs_root *root,
2166                                  struct btrfs_delayed_ref_node *node,
2167                                  struct btrfs_delayed_extent_op *extent_op)
2168 {
2169         struct btrfs_key key;
2170         struct btrfs_path *path;
2171         struct btrfs_extent_item *ei;
2172         struct extent_buffer *leaf;
2173         u32 item_size;
2174         int ret;
2175         int err = 0;
2176         int metadata = !extent_op->is_data;
2177
2178         if (trans->aborted)
2179                 return 0;
2180
2181         if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2182                 metadata = 0;
2183
2184         path = btrfs_alloc_path();
2185         if (!path)
2186                 return -ENOMEM;
2187
2188         key.objectid = node->bytenr;
2189
2190         if (metadata) {
2191                 key.type = BTRFS_METADATA_ITEM_KEY;
2192                 key.offset = extent_op->level;
2193         } else {
2194                 key.type = BTRFS_EXTENT_ITEM_KEY;
2195                 key.offset = node->num_bytes;
2196         }
2197
2198 again:
2199         path->reada = 1;
2200         path->leave_spinning = 1;
2201         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2202                                 path, 0, 1);
2203         if (ret < 0) {
2204                 err = ret;
2205                 goto out;
2206         }
2207         if (ret > 0) {
2208                 if (metadata) {
2209                         if (path->slots[0] > 0) {
2210                                 path->slots[0]--;
2211                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
2212                                                       path->slots[0]);
2213                                 if (key.objectid == node->bytenr &&
2214                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
2215                                     key.offset == node->num_bytes)
2216                                         ret = 0;
2217                         }
2218                         if (ret > 0) {
2219                                 btrfs_release_path(path);
2220                                 metadata = 0;
2221
2222                                 key.objectid = node->bytenr;
2223                                 key.offset = node->num_bytes;
2224                                 key.type = BTRFS_EXTENT_ITEM_KEY;
2225                                 goto again;
2226                         }
2227                 } else {
2228                         err = -EIO;
2229                         goto out;
2230                 }
2231         }
2232
2233         leaf = path->nodes[0];
2234         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2235 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2236         if (item_size < sizeof(*ei)) {
2237                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2238                                              path, (u64)-1, 0);
2239                 if (ret < 0) {
2240                         err = ret;
2241                         goto out;
2242                 }
2243                 leaf = path->nodes[0];
2244                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2245         }
2246 #endif
2247         BUG_ON(item_size < sizeof(*ei));
2248         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2249         __run_delayed_extent_op(extent_op, leaf, ei);
2250
2251         btrfs_mark_buffer_dirty(leaf);
2252 out:
2253         btrfs_free_path(path);
2254         return err;
2255 }
2256
2257 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2258                                 struct btrfs_root *root,
2259                                 struct btrfs_delayed_ref_node *node,
2260                                 struct btrfs_delayed_extent_op *extent_op,
2261                                 int insert_reserved)
2262 {
2263         int ret = 0;
2264         struct btrfs_delayed_tree_ref *ref;
2265         struct btrfs_key ins;
2266         u64 parent = 0;
2267         u64 ref_root = 0;
2268         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2269                                                  SKINNY_METADATA);
2270
2271         ref = btrfs_delayed_node_to_tree_ref(node);
2272         trace_run_delayed_tree_ref(node, ref, node->action);
2273
2274         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2275                 parent = ref->parent;
2276         ref_root = ref->root;
2277
2278         ins.objectid = node->bytenr;
2279         if (skinny_metadata) {
2280                 ins.offset = ref->level;
2281                 ins.type = BTRFS_METADATA_ITEM_KEY;
2282         } else {
2283                 ins.offset = node->num_bytes;
2284                 ins.type = BTRFS_EXTENT_ITEM_KEY;
2285         }
2286
2287         BUG_ON(node->ref_mod != 1);
2288         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2289                 BUG_ON(!extent_op || !extent_op->update_flags);
2290                 ret = alloc_reserved_tree_block(trans, root,
2291                                                 parent, ref_root,
2292                                                 extent_op->flags_to_set,
2293                                                 &extent_op->key,
2294                                                 ref->level, &ins,
2295                                                 node->no_quota);
2296         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2297                 ret = __btrfs_inc_extent_ref(trans, root, node,
2298                                              parent, ref_root,
2299                                              ref->level, 0, 1,
2300                                              extent_op);
2301         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2302                 ret = __btrfs_free_extent(trans, root, node,
2303                                           parent, ref_root,
2304                                           ref->level, 0, 1, extent_op);
2305         } else {
2306                 BUG();
2307         }
2308         return ret;
2309 }
2310
2311 /* helper function to actually process a single delayed ref entry */
2312 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2313                                struct btrfs_root *root,
2314                                struct btrfs_delayed_ref_node *node,
2315                                struct btrfs_delayed_extent_op *extent_op,
2316                                int insert_reserved)
2317 {
2318         int ret = 0;
2319
2320         if (trans->aborted) {
2321                 if (insert_reserved)
2322                         btrfs_pin_extent(root, node->bytenr,
2323                                          node->num_bytes, 1);
2324                 return 0;
2325         }
2326
2327         if (btrfs_delayed_ref_is_head(node)) {
2328                 struct btrfs_delayed_ref_head *head;
2329                 /*
2330                  * we've hit the end of the chain and we were supposed
2331                  * to insert this extent into the tree.  But, it got
2332                  * deleted before we ever needed to insert it, so all
2333                  * we have to do is clean up the accounting
2334                  */
2335                 BUG_ON(extent_op);
2336                 head = btrfs_delayed_node_to_head(node);
2337                 trace_run_delayed_ref_head(node, head, node->action);
2338
2339                 if (insert_reserved) {
2340                         btrfs_pin_extent(root, node->bytenr,
2341                                          node->num_bytes, 1);
2342                         if (head->is_data) {
2343                                 ret = btrfs_del_csums(trans, root,
2344                                                       node->bytenr,
2345                                                       node->num_bytes);
2346                         }
2347                 }
2348                 return ret;
2349         }
2350
2351         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2352             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2353                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2354                                            insert_reserved);
2355         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2356                  node->type == BTRFS_SHARED_DATA_REF_KEY)
2357                 ret = run_delayed_data_ref(trans, root, node, extent_op,
2358                                            insert_reserved);
2359         else
2360                 BUG();
2361         return ret;
2362 }
2363
2364 static inline struct btrfs_delayed_ref_node *
2365 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2366 {
2367         struct btrfs_delayed_ref_node *ref;
2368
2369         if (list_empty(&head->ref_list))
2370                 return NULL;
2371
2372         /*
2373          * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2374          * This is to prevent a ref count from going down to zero, which deletes
2375          * the extent item from the extent tree, when there still are references
2376          * to add, which would fail because they would not find the extent item.
2377          */
2378         list_for_each_entry(ref, &head->ref_list, list) {
2379                 if (ref->action == BTRFS_ADD_DELAYED_REF)
2380                         return ref;
2381         }
2382
2383         return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2384                           list);
2385 }
2386
2387 /*
2388  * Returns 0 on success or if called with an already aborted transaction.
2389  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2390  */
2391 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2392                                              struct btrfs_root *root,
2393                                              unsigned long nr)
2394 {
2395         struct btrfs_delayed_ref_root *delayed_refs;
2396         struct btrfs_delayed_ref_node *ref;
2397         struct btrfs_delayed_ref_head *locked_ref = NULL;
2398         struct btrfs_delayed_extent_op *extent_op;
2399         struct btrfs_fs_info *fs_info = root->fs_info;
2400         ktime_t start = ktime_get();
2401         int ret;
2402         unsigned long count = 0;
2403         unsigned long actual_count = 0;
2404         int must_insert_reserved = 0;
2405
2406         delayed_refs = &trans->transaction->delayed_refs;
2407         while (1) {
2408                 if (!locked_ref) {
2409                         if (count >= nr)
2410                                 break;
2411
2412                         spin_lock(&delayed_refs->lock);
2413                         locked_ref = btrfs_select_ref_head(trans);
2414                         if (!locked_ref) {
2415                                 spin_unlock(&delayed_refs->lock);
2416                                 break;
2417                         }
2418
2419                         /* grab the lock that says we are going to process
2420                          * all the refs for this head */
2421                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
2422                         spin_unlock(&delayed_refs->lock);
2423                         /*
2424                          * we may have dropped the spin lock to get the head
2425                          * mutex lock, and that might have given someone else
2426                          * time to free the head.  If that's true, it has been
2427                          * removed from our list and we can move on.
2428                          */
2429                         if (ret == -EAGAIN) {
2430                                 locked_ref = NULL;
2431                                 count++;
2432                                 continue;
2433                         }
2434                 }
2435
2436                 spin_lock(&locked_ref->lock);
2437
2438                 /*
2439                  * locked_ref is the head node, so we have to go one
2440                  * node back for any delayed ref updates
2441                  */
2442                 ref = select_delayed_ref(locked_ref);
2443
2444                 if (ref && ref->seq &&
2445                     btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2446                         spin_unlock(&locked_ref->lock);
2447                         btrfs_delayed_ref_unlock(locked_ref);
2448                         spin_lock(&delayed_refs->lock);
2449                         locked_ref->processing = 0;
2450                         delayed_refs->num_heads_ready++;
2451                         spin_unlock(&delayed_refs->lock);
2452                         locked_ref = NULL;
2453                         cond_resched();
2454                         count++;
2455                         continue;
2456                 }
2457
2458                 /*
2459                  * record the must insert reserved flag before we
2460                  * drop the spin lock.
2461                  */
2462                 must_insert_reserved = locked_ref->must_insert_reserved;
2463                 locked_ref->must_insert_reserved = 0;
2464
2465                 extent_op = locked_ref->extent_op;
2466                 locked_ref->extent_op = NULL;
2467
2468                 if (!ref) {
2469
2470
2471                         /* All delayed refs have been processed, Go ahead
2472                          * and send the head node to run_one_delayed_ref,
2473                          * so that any accounting fixes can happen
2474                          */
2475                         ref = &locked_ref->node;
2476
2477                         if (extent_op && must_insert_reserved) {
2478                                 btrfs_free_delayed_extent_op(extent_op);
2479                                 extent_op = NULL;
2480                         }
2481
2482                         if (extent_op) {
2483                                 spin_unlock(&locked_ref->lock);
2484                                 ret = run_delayed_extent_op(trans, root,
2485                                                             ref, extent_op);
2486                                 btrfs_free_delayed_extent_op(extent_op);
2487
2488                                 if (ret) {
2489                                         /*
2490                                          * Need to reset must_insert_reserved if
2491                                          * there was an error so the abort stuff
2492                                          * can cleanup the reserved space
2493                                          * properly.
2494                                          */
2495                                         if (must_insert_reserved)
2496                                                 locked_ref->must_insert_reserved = 1;
2497                                         locked_ref->processing = 0;
2498                                         btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2499                                         btrfs_delayed_ref_unlock(locked_ref);
2500                                         return ret;
2501                                 }
2502                                 continue;
2503                         }
2504
2505                         /*
2506                          * Need to drop our head ref lock and re-aqcuire the
2507                          * delayed ref lock and then re-check to make sure
2508                          * nobody got added.
2509                          */
2510                         spin_unlock(&locked_ref->lock);
2511                         spin_lock(&delayed_refs->lock);
2512                         spin_lock(&locked_ref->lock);
2513                         if (!list_empty(&locked_ref->ref_list) ||
2514                             locked_ref->extent_op) {
2515                                 spin_unlock(&locked_ref->lock);
2516                                 spin_unlock(&delayed_refs->lock);
2517                                 continue;
2518                         }
2519                         ref->in_tree = 0;
2520                         delayed_refs->num_heads--;
2521                         rb_erase(&locked_ref->href_node,
2522                                  &delayed_refs->href_root);
2523                         spin_unlock(&delayed_refs->lock);
2524                 } else {
2525                         actual_count++;
2526                         ref->in_tree = 0;
2527                         list_del(&ref->list);
2528                 }
2529                 atomic_dec(&delayed_refs->num_entries);
2530
2531                 if (!btrfs_delayed_ref_is_head(ref)) {
2532                         /*
2533                          * when we play the delayed ref, also correct the
2534                          * ref_mod on head
2535                          */
2536                         switch (ref->action) {
2537                         case BTRFS_ADD_DELAYED_REF:
2538                         case BTRFS_ADD_DELAYED_EXTENT:
2539                                 locked_ref->node.ref_mod -= ref->ref_mod;
2540                                 break;
2541                         case BTRFS_DROP_DELAYED_REF:
2542                                 locked_ref->node.ref_mod += ref->ref_mod;
2543                                 break;
2544                         default:
2545                                 WARN_ON(1);
2546                         }
2547                 }
2548                 spin_unlock(&locked_ref->lock);
2549
2550                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2551                                           must_insert_reserved);
2552
2553                 btrfs_free_delayed_extent_op(extent_op);
2554                 if (ret) {
2555                         locked_ref->processing = 0;
2556                         btrfs_delayed_ref_unlock(locked_ref);
2557                         btrfs_put_delayed_ref(ref);
2558                         btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2559                         return ret;
2560                 }
2561
2562                 /*
2563                  * If this node is a head, that means all the refs in this head
2564                  * have been dealt with, and we will pick the next head to deal
2565                  * with, so we must unlock the head and drop it from the cluster
2566                  * list before we release it.
2567                  */
2568                 if (btrfs_delayed_ref_is_head(ref)) {
2569                         if (locked_ref->is_data &&
2570                             locked_ref->total_ref_mod < 0) {
2571                                 spin_lock(&delayed_refs->lock);
2572                                 delayed_refs->pending_csums -= ref->num_bytes;
2573                                 spin_unlock(&delayed_refs->lock);
2574                         }
2575                         btrfs_delayed_ref_unlock(locked_ref);
2576                         locked_ref = NULL;
2577                 }
2578                 btrfs_put_delayed_ref(ref);
2579                 count++;
2580                 cond_resched();
2581         }
2582
2583         /*
2584          * We don't want to include ref heads since we can have empty ref heads
2585          * and those will drastically skew our runtime down since we just do
2586          * accounting, no actual extent tree updates.
2587          */
2588         if (actual_count > 0) {
2589                 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2590                 u64 avg;
2591
2592                 /*
2593                  * We weigh the current average higher than our current runtime
2594                  * to avoid large swings in the average.
2595                  */
2596                 spin_lock(&delayed_refs->lock);
2597                 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2598                 fs_info->avg_delayed_ref_runtime = avg >> 2;    /* div by 4 */
2599                 spin_unlock(&delayed_refs->lock);
2600         }
2601         return 0;
2602 }
2603
2604 #ifdef SCRAMBLE_DELAYED_REFS
2605 /*
2606  * Normally delayed refs get processed in ascending bytenr order. This
2607  * correlates in most cases to the order added. To expose dependencies on this
2608  * order, we start to process the tree in the middle instead of the beginning
2609  */
2610 static u64 find_middle(struct rb_root *root)
2611 {
2612         struct rb_node *n = root->rb_node;
2613         struct btrfs_delayed_ref_node *entry;
2614         int alt = 1;
2615         u64 middle;
2616         u64 first = 0, last = 0;
2617
2618         n = rb_first(root);
2619         if (n) {
2620                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2621                 first = entry->bytenr;
2622         }
2623         n = rb_last(root);
2624         if (n) {
2625                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2626                 last = entry->bytenr;
2627         }
2628         n = root->rb_node;
2629
2630         while (n) {
2631                 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2632                 WARN_ON(!entry->in_tree);
2633
2634                 middle = entry->bytenr;
2635
2636                 if (alt)
2637                         n = n->rb_left;
2638                 else
2639                         n = n->rb_right;
2640
2641                 alt = 1 - alt;
2642         }
2643         return middle;
2644 }
2645 #endif
2646
2647 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2648 {
2649         u64 num_bytes;
2650
2651         num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2652                              sizeof(struct btrfs_extent_inline_ref));
2653         if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2654                 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2655
2656         /*
2657          * We don't ever fill up leaves all the way so multiply by 2 just to be
2658          * closer to what we're really going to want to ouse.
2659          */
2660         return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2661 }
2662
2663 /*
2664  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2665  * would require to store the csums for that many bytes.
2666  */
2667 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2668 {
2669         u64 csum_size;
2670         u64 num_csums_per_leaf;
2671         u64 num_csums;
2672
2673         csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2674         num_csums_per_leaf = div64_u64(csum_size,
2675                         (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2676         num_csums = div64_u64(csum_bytes, root->sectorsize);
2677         num_csums += num_csums_per_leaf - 1;
2678         num_csums = div64_u64(num_csums, num_csums_per_leaf);
2679         return num_csums;
2680 }
2681
2682 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2683                                        struct btrfs_root *root)
2684 {
2685         struct btrfs_block_rsv *global_rsv;
2686         u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2687         u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2688         u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2689         u64 num_bytes, num_dirty_bgs_bytes;
2690         int ret = 0;
2691
2692         num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2693         num_heads = heads_to_leaves(root, num_heads);
2694         if (num_heads > 1)
2695                 num_bytes += (num_heads - 1) * root->nodesize;
2696         num_bytes <<= 1;
2697         num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2698         num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2699                                                              num_dirty_bgs);
2700         global_rsv = &root->fs_info->global_block_rsv;
2701
2702         /*
2703          * If we can't allocate any more chunks lets make sure we have _lots_ of
2704          * wiggle room since running delayed refs can create more delayed refs.
2705          */
2706         if (global_rsv->space_info->full) {
2707                 num_dirty_bgs_bytes <<= 1;
2708                 num_bytes <<= 1;
2709         }
2710
2711         spin_lock(&global_rsv->lock);
2712         if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2713                 ret = 1;
2714         spin_unlock(&global_rsv->lock);
2715         return ret;
2716 }
2717
2718 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2719                                        struct btrfs_root *root)
2720 {
2721         struct btrfs_fs_info *fs_info = root->fs_info;
2722         u64 num_entries =
2723                 atomic_read(&trans->transaction->delayed_refs.num_entries);
2724         u64 avg_runtime;
2725         u64 val;
2726
2727         smp_mb();
2728         avg_runtime = fs_info->avg_delayed_ref_runtime;
2729         val = num_entries * avg_runtime;
2730         if (num_entries * avg_runtime >= NSEC_PER_SEC)
2731                 return 1;
2732         if (val >= NSEC_PER_SEC / 2)
2733                 return 2;
2734
2735         return btrfs_check_space_for_delayed_refs(trans, root);
2736 }
2737
2738 struct async_delayed_refs {
2739         struct btrfs_root *root;
2740         int count;
2741         int error;
2742         int sync;
2743         struct completion wait;
2744         struct btrfs_work work;
2745 };
2746
2747 static void delayed_ref_async_start(struct btrfs_work *work)
2748 {
2749         struct async_delayed_refs *async;
2750         struct btrfs_trans_handle *trans;
2751         int ret;
2752
2753         async = container_of(work, struct async_delayed_refs, work);
2754
2755         trans = btrfs_join_transaction(async->root);
2756         if (IS_ERR(trans)) {
2757                 async->error = PTR_ERR(trans);
2758                 goto done;
2759         }
2760
2761         /*
2762          * trans->sync means that when we call end_transaciton, we won't
2763          * wait on delayed refs
2764          */
2765         trans->sync = true;
2766         ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2767         if (ret)
2768                 async->error = ret;
2769
2770         ret = btrfs_end_transaction(trans, async->root);
2771         if (ret && !async->error)
2772                 async->error = ret;
2773 done:
2774         if (async->sync)
2775                 complete(&async->wait);
2776         else
2777                 kfree(async);
2778 }
2779
2780 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2781                                  unsigned long count, int wait)
2782 {
2783         struct async_delayed_refs *async;
2784         int ret;
2785
2786         async = kmalloc(sizeof(*async), GFP_NOFS);
2787         if (!async)
2788                 return -ENOMEM;
2789
2790         async->root = root->fs_info->tree_root;
2791         async->count = count;
2792         async->error = 0;
2793         if (wait)
2794                 async->sync = 1;
2795         else
2796                 async->sync = 0;
2797         init_completion(&async->wait);
2798
2799         btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2800                         delayed_ref_async_start, NULL, NULL);
2801
2802         btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2803
2804         if (wait) {
2805                 wait_for_completion(&async->wait);
2806                 ret = async->error;
2807                 kfree(async);
2808                 return ret;
2809         }
2810         return 0;
2811 }
2812
2813 /*
2814  * this starts processing the delayed reference count updates and
2815  * extent insertions we have queued up so far.  count can be
2816  * 0, which means to process everything in the tree at the start
2817  * of the run (but not newly added entries), or it can be some target
2818  * number you'd like to process.
2819  *
2820  * Returns 0 on success or if called with an aborted transaction
2821  * Returns <0 on error and aborts the transaction
2822  */
2823 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2824                            struct btrfs_root *root, unsigned long count)
2825 {
2826         struct rb_node *node;
2827         struct btrfs_delayed_ref_root *delayed_refs;
2828         struct btrfs_delayed_ref_head *head;
2829         int ret;
2830         int run_all = count == (unsigned long)-1;
2831         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2832
2833         /* We'll clean this up in btrfs_cleanup_transaction */
2834         if (trans->aborted)
2835                 return 0;
2836
2837         if (root == root->fs_info->extent_root)
2838                 root = root->fs_info->tree_root;
2839
2840         delayed_refs = &trans->transaction->delayed_refs;
2841         if (count == 0)
2842                 count = atomic_read(&delayed_refs->num_entries) * 2;
2843
2844 again:
2845 #ifdef SCRAMBLE_DELAYED_REFS
2846         delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2847 #endif
2848         trans->can_flush_pending_bgs = false;
2849         ret = __btrfs_run_delayed_refs(trans, root, count);
2850         if (ret < 0) {
2851                 btrfs_abort_transaction(trans, root, ret);
2852                 return ret;
2853         }
2854
2855         if (run_all) {
2856                 if (!list_empty(&trans->new_bgs))
2857                         btrfs_create_pending_block_groups(trans, root);
2858
2859                 spin_lock(&delayed_refs->lock);
2860                 node = rb_first(&delayed_refs->href_root);
2861                 if (!node) {
2862                         spin_unlock(&delayed_refs->lock);
2863                         goto out;
2864                 }
2865                 count = (unsigned long)-1;
2866
2867                 while (node) {
2868                         head = rb_entry(node, struct btrfs_delayed_ref_head,
2869                                         href_node);
2870                         if (btrfs_delayed_ref_is_head(&head->node)) {
2871                                 struct btrfs_delayed_ref_node *ref;
2872
2873                                 ref = &head->node;
2874                                 atomic_inc(&ref->refs);
2875
2876                                 spin_unlock(&delayed_refs->lock);
2877                                 /*
2878                                  * Mutex was contended, block until it's
2879                                  * released and try again
2880                                  */
2881                                 mutex_lock(&head->mutex);
2882                                 mutex_unlock(&head->mutex);
2883
2884                                 btrfs_put_delayed_ref(ref);
2885                                 cond_resched();
2886                                 goto again;
2887                         } else {
2888                                 WARN_ON(1);
2889                         }
2890                         node = rb_next(node);
2891                 }
2892                 spin_unlock(&delayed_refs->lock);
2893                 cond_resched();
2894                 goto again;
2895         }
2896 out:
2897         assert_qgroups_uptodate(trans);
2898         trans->can_flush_pending_bgs = can_flush_pending_bgs;
2899         return 0;
2900 }
2901
2902 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2903                                 struct btrfs_root *root,
2904                                 u64 bytenr, u64 num_bytes, u64 flags,
2905                                 int level, int is_data)
2906 {
2907         struct btrfs_delayed_extent_op *extent_op;
2908         int ret;
2909
2910         extent_op = btrfs_alloc_delayed_extent_op();
2911         if (!extent_op)
2912                 return -ENOMEM;
2913
2914         extent_op->flags_to_set = flags;
2915         extent_op->update_flags = 1;
2916         extent_op->update_key = 0;
2917         extent_op->is_data = is_data ? 1 : 0;
2918         extent_op->level = level;
2919
2920         ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2921                                           num_bytes, extent_op);
2922         if (ret)
2923                 btrfs_free_delayed_extent_op(extent_op);
2924         return ret;
2925 }
2926
2927 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2928                                       struct btrfs_root *root,
2929                                       struct btrfs_path *path,
2930                                       u64 objectid, u64 offset, u64 bytenr)
2931 {
2932         struct btrfs_delayed_ref_head *head;
2933         struct btrfs_delayed_ref_node *ref;
2934         struct btrfs_delayed_data_ref *data_ref;
2935         struct btrfs_delayed_ref_root *delayed_refs;
2936         int ret = 0;
2937
2938         delayed_refs = &trans->transaction->delayed_refs;
2939         spin_lock(&delayed_refs->lock);
2940         head = btrfs_find_delayed_ref_head(trans, bytenr);
2941         if (!head) {
2942                 spin_unlock(&delayed_refs->lock);
2943                 return 0;
2944         }
2945
2946         if (!mutex_trylock(&head->mutex)) {
2947                 atomic_inc(&head->node.refs);
2948                 spin_unlock(&delayed_refs->lock);
2949
2950                 btrfs_release_path(path);
2951
2952                 /*
2953                  * Mutex was contended, block until it's released and let
2954                  * caller try again
2955                  */
2956                 mutex_lock(&head->mutex);
2957                 mutex_unlock(&head->mutex);
2958                 btrfs_put_delayed_ref(&head->node);
2959                 return -EAGAIN;
2960         }
2961         spin_unlock(&delayed_refs->lock);
2962
2963         spin_lock(&head->lock);
2964         list_for_each_entry(ref, &head->ref_list, list) {
2965                 /* If it's a shared ref we know a cross reference exists */
2966                 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
2967                         ret = 1;
2968                         break;
2969                 }
2970
2971                 data_ref = btrfs_delayed_node_to_data_ref(ref);
2972
2973                 /*
2974                  * If our ref doesn't match the one we're currently looking at
2975                  * then we have a cross reference.
2976                  */
2977                 if (data_ref->root != root->root_key.objectid ||
2978                     data_ref->objectid != objectid ||
2979                     data_ref->offset != offset) {
2980                         ret = 1;
2981                         break;
2982                 }
2983         }
2984         spin_unlock(&head->lock);
2985         mutex_unlock(&head->mutex);
2986         return ret;
2987 }
2988
2989 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2990                                         struct btrfs_root *root,
2991                                         struct btrfs_path *path,
2992                                         u64 objectid, u64 offset, u64 bytenr)
2993 {
2994         struct btrfs_root *extent_root = root->fs_info->extent_root;
2995         struct extent_buffer *leaf;
2996         struct btrfs_extent_data_ref *ref;
2997         struct btrfs_extent_inline_ref *iref;
2998         struct btrfs_extent_item *ei;
2999         struct btrfs_key key;
3000         u32 item_size;
3001         int ret;
3002
3003         key.objectid = bytenr;
3004         key.offset = (u64)-1;
3005         key.type = BTRFS_EXTENT_ITEM_KEY;
3006
3007         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3008         if (ret < 0)
3009                 goto out;
3010         BUG_ON(ret == 0); /* Corruption */
3011
3012         ret = -ENOENT;
3013         if (path->slots[0] == 0)
3014                 goto out;
3015
3016         path->slots[0]--;
3017         leaf = path->nodes[0];
3018         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3019
3020         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3021                 goto out;
3022
3023         ret = 1;
3024         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3025 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3026         if (item_size < sizeof(*ei)) {
3027                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3028                 goto out;
3029         }
3030 #endif
3031         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3032
3033         if (item_size != sizeof(*ei) +
3034             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3035                 goto out;
3036
3037         if (btrfs_extent_generation(leaf, ei) <=
3038             btrfs_root_last_snapshot(&root->root_item))
3039                 goto out;
3040
3041         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3042         if (btrfs_extent_inline_ref_type(leaf, iref) !=
3043             BTRFS_EXTENT_DATA_REF_KEY)
3044                 goto out;
3045
3046         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3047         if (btrfs_extent_refs(leaf, ei) !=
3048             btrfs_extent_data_ref_count(leaf, ref) ||
3049             btrfs_extent_data_ref_root(leaf, ref) !=
3050             root->root_key.objectid ||
3051             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3052             btrfs_extent_data_ref_offset(leaf, ref) != offset)
3053                 goto out;
3054
3055         ret = 0;
3056 out:
3057         return ret;
3058 }
3059
3060 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3061                           struct btrfs_root *root,
3062                           u64 objectid, u64 offset, u64 bytenr)
3063 {
3064         struct btrfs_path *path;
3065         int ret;
3066         int ret2;
3067
3068         path = btrfs_alloc_path();
3069         if (!path)
3070                 return -ENOENT;
3071
3072         do {
3073                 ret = check_committed_ref(trans, root, path, objectid,
3074                                           offset, bytenr);
3075                 if (ret && ret != -ENOENT)
3076                         goto out;
3077
3078                 ret2 = check_delayed_ref(trans, root, path, objectid,
3079                                          offset, bytenr);
3080         } while (ret2 == -EAGAIN);
3081
3082         if (ret2 && ret2 != -ENOENT) {
3083                 ret = ret2;
3084                 goto out;
3085         }
3086
3087         if (ret != -ENOENT || ret2 != -ENOENT)
3088                 ret = 0;
3089 out:
3090         btrfs_free_path(path);
3091         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3092                 WARN_ON(ret > 0);
3093         return ret;
3094 }
3095
3096 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3097                            struct btrfs_root *root,
3098                            struct extent_buffer *buf,
3099                            int full_backref, int inc)
3100 {
3101         u64 bytenr;
3102         u64 num_bytes;
3103         u64 parent;
3104         u64 ref_root;
3105         u32 nritems;
3106         struct btrfs_key key;
3107         struct btrfs_file_extent_item *fi;
3108         int i;
3109         int level;
3110         int ret = 0;
3111         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3112                             u64, u64, u64, u64, u64, u64, int);
3113
3114
3115         if (btrfs_test_is_dummy_root(root))
3116                 return 0;
3117
3118         ref_root = btrfs_header_owner(buf);
3119         nritems = btrfs_header_nritems(buf);
3120         level = btrfs_header_level(buf);
3121
3122         if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3123                 return 0;
3124
3125         if (inc)
3126                 process_func = btrfs_inc_extent_ref;
3127         else
3128                 process_func = btrfs_free_extent;
3129
3130         if (full_backref)
3131                 parent = buf->start;
3132         else
3133                 parent = 0;
3134
3135         for (i = 0; i < nritems; i++) {
3136                 if (level == 0) {
3137                         btrfs_item_key_to_cpu(buf, &key, i);
3138                         if (key.type != BTRFS_EXTENT_DATA_KEY)
3139                                 continue;
3140                         fi = btrfs_item_ptr(buf, i,
3141                                             struct btrfs_file_extent_item);
3142                         if (btrfs_file_extent_type(buf, fi) ==
3143                             BTRFS_FILE_EXTENT_INLINE)
3144                                 continue;
3145                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3146                         if (bytenr == 0)
3147                                 continue;
3148
3149                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3150                         key.offset -= btrfs_file_extent_offset(buf, fi);
3151                         ret = process_func(trans, root, bytenr, num_bytes,
3152                                            parent, ref_root, key.objectid,
3153                                            key.offset, 1);
3154                         if (ret)
3155                                 goto fail;
3156                 } else {
3157                         bytenr = btrfs_node_blockptr(buf, i);
3158                         num_bytes = root->nodesize;
3159                         ret = process_func(trans, root, bytenr, num_bytes,
3160                                            parent, ref_root, level - 1, 0,
3161                                            1);
3162                         if (ret)
3163                                 goto fail;
3164                 }
3165         }
3166         return 0;
3167 fail:
3168         return ret;
3169 }
3170
3171 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3172                   struct extent_buffer *buf, int full_backref)
3173 {
3174         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3175 }
3176
3177 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3178                   struct extent_buffer *buf, int full_backref)
3179 {
3180         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3181 }
3182
3183 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3184                                  struct btrfs_root *root,
3185                                  struct btrfs_path *path,
3186                                  struct btrfs_block_group_cache *cache)
3187 {
3188         int ret;
3189         struct btrfs_root *extent_root = root->fs_info->extent_root;
3190         unsigned long bi;
3191         struct extent_buffer *leaf;
3192
3193         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3194         if (ret) {
3195                 if (ret > 0)
3196                         ret = -ENOENT;
3197                 goto fail;
3198         }
3199
3200         leaf = path->nodes[0];
3201         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3202         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3203         btrfs_mark_buffer_dirty(leaf);
3204 fail:
3205         btrfs_release_path(path);
3206         return ret;
3207
3208 }
3209
3210 static struct btrfs_block_group_cache *
3211 next_block_group(struct btrfs_root *root,
3212                  struct btrfs_block_group_cache *cache)
3213 {
3214         struct rb_node *node;
3215
3216         spin_lock(&root->fs_info->block_group_cache_lock);
3217
3218         /* If our block group was removed, we need a full search. */
3219         if (RB_EMPTY_NODE(&cache->cache_node)) {
3220                 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3221
3222                 spin_unlock(&root->fs_info->block_group_cache_lock);
3223                 btrfs_put_block_group(cache);
3224                 cache = btrfs_lookup_first_block_group(root->fs_info,
3225                                                        next_bytenr);
3226                 return cache;
3227         }
3228         node = rb_next(&cache->cache_node);
3229         btrfs_put_block_group(cache);
3230         if (node) {
3231                 cache = rb_entry(node, struct btrfs_block_group_cache,
3232                                  cache_node);
3233                 btrfs_get_block_group(cache);
3234         } else
3235                 cache = NULL;
3236         spin_unlock(&root->fs_info->block_group_cache_lock);
3237         return cache;
3238 }
3239
3240 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3241                             struct btrfs_trans_handle *trans,
3242                             struct btrfs_path *path)
3243 {
3244         struct btrfs_root *root = block_group->fs_info->tree_root;
3245         struct inode *inode = NULL;
3246         u64 alloc_hint = 0;
3247         int dcs = BTRFS_DC_ERROR;
3248         u64 num_pages = 0;
3249         int retries = 0;
3250         int ret = 0;
3251
3252         /*
3253          * If this block group is smaller than 100 megs don't bother caching the
3254          * block group.
3255          */
3256         if (block_group->key.offset < (100 * 1024 * 1024)) {
3257                 spin_lock(&block_group->lock);
3258                 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3259                 spin_unlock(&block_group->lock);
3260                 return 0;
3261         }
3262
3263         if (trans->aborted)
3264                 return 0;
3265 again:
3266         inode = lookup_free_space_inode(root, block_group, path);
3267         if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3268                 ret = PTR_ERR(inode);
3269                 btrfs_release_path(path);
3270                 goto out;
3271         }
3272
3273         if (IS_ERR(inode)) {
3274                 BUG_ON(retries);
3275                 retries++;
3276
3277                 if (block_group->ro)
3278                         goto out_free;
3279
3280                 ret = create_free_space_inode(root, trans, block_group, path);
3281                 if (ret)
3282                         goto out_free;
3283                 goto again;
3284         }
3285
3286         /* We've already setup this transaction, go ahead and exit */
3287         if (block_group->cache_generation == trans->transid &&
3288             i_size_read(inode)) {
3289                 dcs = BTRFS_DC_SETUP;
3290                 goto out_put;
3291         }
3292
3293         /*
3294          * We want to set the generation to 0, that way if anything goes wrong
3295          * from here on out we know not to trust this cache when we load up next
3296          * time.
3297          */
3298         BTRFS_I(inode)->generation = 0;
3299         ret = btrfs_update_inode(trans, root, inode);
3300         if (ret) {
3301                 /*
3302                  * So theoretically we could recover from this, simply set the
3303                  * super cache generation to 0 so we know to invalidate the
3304                  * cache, but then we'd have to keep track of the block groups
3305                  * that fail this way so we know we _have_ to reset this cache
3306                  * before the next commit or risk reading stale cache.  So to
3307                  * limit our exposure to horrible edge cases lets just abort the
3308                  * transaction, this only happens in really bad situations
3309                  * anyway.
3310                  */
3311                 btrfs_abort_transaction(trans, root, ret);
3312                 goto out_put;
3313         }
3314         WARN_ON(ret);
3315
3316         if (i_size_read(inode) > 0) {
3317                 ret = btrfs_check_trunc_cache_free_space(root,
3318                                         &root->fs_info->global_block_rsv);
3319                 if (ret)
3320                         goto out_put;
3321
3322                 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3323                 if (ret)
3324                         goto out_put;
3325         }
3326
3327         spin_lock(&block_group->lock);
3328         if (block_group->cached != BTRFS_CACHE_FINISHED ||
3329             !btrfs_test_opt(root, SPACE_CACHE)) {
3330                 /*
3331                  * don't bother trying to write stuff out _if_
3332                  * a) we're not cached,
3333                  * b) we're with nospace_cache mount option.
3334                  */
3335                 dcs = BTRFS_DC_WRITTEN;
3336                 spin_unlock(&block_group->lock);
3337                 goto out_put;
3338         }
3339         spin_unlock(&block_group->lock);
3340
3341         /*
3342          * Try to preallocate enough space based on how big the block group is.
3343          * Keep in mind this has to include any pinned space which could end up
3344          * taking up quite a bit since it's not folded into the other space
3345          * cache.
3346          */
3347         num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3348         if (!num_pages)
3349                 num_pages = 1;
3350
3351         num_pages *= 16;
3352         num_pages *= PAGE_CACHE_SIZE;
3353
3354         ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3355         if (ret)
3356                 goto out_put;
3357
3358         ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3359                                               num_pages, num_pages,
3360                                               &alloc_hint);
3361         if (!ret)
3362                 dcs = BTRFS_DC_SETUP;
3363         btrfs_free_reserved_data_space(inode, num_pages);
3364
3365 out_put:
3366         iput(inode);
3367 out_free:
3368         btrfs_release_path(path);
3369 out:
3370         spin_lock(&block_group->lock);
3371         if (!ret && dcs == BTRFS_DC_SETUP)
3372                 block_group->cache_generation = trans->transid;
3373         block_group->disk_cache_state = dcs;
3374         spin_unlock(&block_group->lock);
3375
3376         return ret;
3377 }
3378
3379 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3380                             struct btrfs_root *root)
3381 {
3382         struct btrfs_block_group_cache *cache, *tmp;
3383         struct btrfs_transaction *cur_trans = trans->transaction;
3384         struct btrfs_path *path;
3385
3386         if (list_empty(&cur_trans->dirty_bgs) ||
3387             !btrfs_test_opt(root, SPACE_CACHE))
3388                 return 0;
3389
3390         path = btrfs_alloc_path();
3391         if (!path)
3392                 return -ENOMEM;
3393
3394         /* Could add new block groups, use _safe just in case */
3395         list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3396                                  dirty_list) {
3397                 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3398                         cache_save_setup(cache, trans, path);
3399         }
3400
3401         btrfs_free_path(path);
3402         return 0;
3403 }
3404
3405 /*
3406  * transaction commit does final block group cache writeback during a
3407  * critical section where nothing is allowed to change the FS.  This is
3408  * required in order for the cache to actually match the block group,
3409  * but can introduce a lot of latency into the commit.
3410  *
3411  * So, btrfs_start_dirty_block_groups is here to kick off block group
3412  * cache IO.  There's a chance we'll have to redo some of it if the
3413  * block group changes again during the commit, but it greatly reduces
3414  * the commit latency by getting rid of the easy block groups while
3415  * we're still allowing others to join the commit.
3416  */
3417 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3418                                    struct btrfs_root *root)
3419 {
3420         struct btrfs_block_group_cache *cache;
3421         struct btrfs_transaction *cur_trans = trans->transaction;
3422         int ret = 0;
3423         int should_put;
3424         struct btrfs_path *path = NULL;
3425         LIST_HEAD(dirty);
3426         struct list_head *io = &cur_trans->io_bgs;
3427         int num_started = 0;
3428         int loops = 0;
3429
3430         spin_lock(&cur_trans->dirty_bgs_lock);
3431         if (list_empty(&cur_trans->dirty_bgs)) {
3432                 spin_unlock(&cur_trans->dirty_bgs_lock);
3433                 return 0;
3434         }
3435         list_splice_init(&cur_trans->dirty_bgs, &dirty);
3436         spin_unlock(&cur_trans->dirty_bgs_lock);
3437
3438 again:
3439         /*
3440          * make sure all the block groups on our dirty list actually
3441          * exist
3442          */
3443         btrfs_create_pending_block_groups(trans, root);
3444
3445         if (!path) {
3446                 path = btrfs_alloc_path();
3447                 if (!path)
3448                         return -ENOMEM;
3449         }
3450
3451         /*
3452          * cache_write_mutex is here only to save us from balance or automatic
3453          * removal of empty block groups deleting this block group while we are
3454          * writing out the cache
3455          */
3456         mutex_lock(&trans->transaction->cache_write_mutex);
3457         while (!list_empty(&dirty)) {
3458                 cache = list_first_entry(&dirty,
3459                                          struct btrfs_block_group_cache,
3460                                          dirty_list);
3461                 /*
3462                  * this can happen if something re-dirties a block
3463                  * group that is already under IO.  Just wait for it to
3464                  * finish and then do it all again
3465                  */
3466                 if (!list_empty(&cache->io_list)) {
3467                         list_del_init(&cache->io_list);
3468                         btrfs_wait_cache_io(root, trans, cache,
3469                                             &cache->io_ctl, path,
3470                                             cache->key.objectid);
3471                         btrfs_put_block_group(cache);
3472                 }
3473
3474
3475                 /*
3476                  * btrfs_wait_cache_io uses the cache->dirty_list to decide
3477                  * if it should update the cache_state.  Don't delete
3478                  * until after we wait.
3479                  *
3480                  * Since we're not running in the commit critical section
3481                  * we need the dirty_bgs_lock to protect from update_block_group
3482                  */
3483                 spin_lock(&cur_trans->dirty_bgs_lock);
3484                 list_del_init(&cache->dirty_list);
3485                 spin_unlock(&cur_trans->dirty_bgs_lock);
3486
3487                 should_put = 1;
3488
3489                 cache_save_setup(cache, trans, path);
3490
3491                 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3492                         cache->io_ctl.inode = NULL;
3493                         ret = btrfs_write_out_cache(root, trans, cache, path);
3494                         if (ret == 0 && cache->io_ctl.inode) {
3495                                 num_started++;
3496                                 should_put = 0;
3497
3498                                 /*
3499                                  * the cache_write_mutex is protecting
3500                                  * the io_list
3501                                  */
3502                                 list_add_tail(&cache->io_list, io);
3503                         } else {
3504                                 /*
3505                                  * if we failed to write the cache, the
3506                                  * generation will be bad and life goes on
3507                                  */
3508                                 ret = 0;
3509                         }
3510                 }
3511                 if (!ret) {
3512                         ret = write_one_cache_group(trans, root, path, cache);
3513                         /*
3514                          * Our block group might still be attached to the list
3515                          * of new block groups in the transaction handle of some
3516                          * other task (struct btrfs_trans_handle->new_bgs). This
3517                          * means its block group item isn't yet in the extent
3518                          * tree. If this happens ignore the error, as we will
3519                          * try again later in the critical section of the
3520                          * transaction commit.
3521                          */
3522                         if (ret == -ENOENT) {
3523                                 ret = 0;
3524                                 spin_lock(&cur_trans->dirty_bgs_lock);
3525                                 if (list_empty(&cache->dirty_list)) {
3526                                         list_add_tail(&cache->dirty_list,
3527                                                       &cur_trans->dirty_bgs);
3528                                         btrfs_get_block_group(cache);
3529                                 }
3530                                 spin_unlock(&cur_trans->dirty_bgs_lock);
3531                         } else if (ret) {
3532                                 btrfs_abort_transaction(trans, root, ret);
3533                         }
3534                 }
3535
3536                 /* if its not on the io list, we need to put the block group */
3537                 if (should_put)
3538                         btrfs_put_block_group(cache);
3539
3540                 if (ret)
3541                         break;
3542
3543                 /*
3544                  * Avoid blocking other tasks for too long. It might even save
3545                  * us from writing caches for block groups that are going to be
3546                  * removed.
3547                  */
3548                 mutex_unlock(&trans->transaction->cache_write_mutex);
3549                 mutex_lock(&trans->transaction->cache_write_mutex);
3550         }
3551         mutex_unlock(&trans->transaction->cache_write_mutex);
3552
3553         /*
3554          * go through delayed refs for all the stuff we've just kicked off
3555          * and then loop back (just once)
3556          */
3557         ret = btrfs_run_delayed_refs(trans, root, 0);
3558         if (!ret && loops == 0) {
3559                 loops++;
3560                 spin_lock(&cur_trans->dirty_bgs_lock);
3561                 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3562                 /*
3563                  * dirty_bgs_lock protects us from concurrent block group
3564                  * deletes too (not just cache_write_mutex).
3565                  */
3566                 if (!list_empty(&dirty)) {
3567                         spin_unlock(&cur_trans->dirty_bgs_lock);
3568                         goto again;
3569                 }
3570                 spin_unlock(&cur_trans->dirty_bgs_lock);
3571         }
3572
3573         btrfs_free_path(path);
3574         return ret;
3575 }
3576
3577 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3578                                    struct btrfs_root *root)
3579 {
3580         struct btrfs_block_group_cache *cache;
3581         struct btrfs_transaction *cur_trans = trans->transaction;
3582         int ret = 0;
3583         int should_put;
3584         struct btrfs_path *path;
3585         struct list_head *io = &cur_trans->io_bgs;
3586         int num_started = 0;
3587
3588         path = btrfs_alloc_path();
3589         if (!path)
3590                 return -ENOMEM;
3591
3592         /*
3593          * We don't need the lock here since we are protected by the transaction
3594          * commit.  We want to do the cache_save_setup first and then run the
3595          * delayed refs to make sure we have the best chance at doing this all
3596          * in one shot.
3597          */
3598         while (!list_empty(&cur_trans->dirty_bgs)) {
3599                 cache = list_first_entry(&cur_trans->dirty_bgs,
3600                                          struct btrfs_block_group_cache,
3601                                          dirty_list);
3602
3603                 /*
3604                  * this can happen if cache_save_setup re-dirties a block
3605                  * group that is already under IO.  Just wait for it to
3606                  * finish and then do it all again
3607                  */
3608                 if (!list_empty(&cache->io_list)) {
3609                         list_del_init(&cache->io_list);
3610                         btrfs_wait_cache_io(root, trans, cache,
3611                                             &cache->io_ctl, path,
3612                                             cache->key.objectid);
3613                         btrfs_put_block_group(cache);
3614                 }
3615
3616                 /*
3617                  * don't remove from the dirty list until after we've waited
3618                  * on any pending IO
3619                  */
3620                 list_del_init(&cache->dirty_list);
3621                 should_put = 1;
3622
3623                 cache_save_setup(cache, trans, path);
3624
3625                 if (!ret)
3626                         ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3627
3628                 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3629                         cache->io_ctl.inode = NULL;
3630                         ret = btrfs_write_out_cache(root, trans, cache, path);
3631                         if (ret == 0 && cache->io_ctl.inode) {
3632                                 num_started++;
3633                                 should_put = 0;
3634                                 list_add_tail(&cache->io_list, io);
3635                         } else {
3636                                 /*
3637                                  * if we failed to write the cache, the
3638                                  * generation will be bad and life goes on
3639                                  */
3640                                 ret = 0;
3641                         }
3642                 }
3643                 if (!ret) {
3644                         ret = write_one_cache_group(trans, root, path, cache);
3645                         if (ret)
3646                                 btrfs_abort_transaction(trans, root, ret);
3647                 }
3648
3649                 /* if its not on the io list, we need to put the block group */
3650                 if (should_put)
3651                         btrfs_put_block_group(cache);
3652         }
3653
3654         while (!list_empty(io)) {
3655                 cache = list_first_entry(io, struct btrfs_block_group_cache,
3656                                          io_list);
3657                 list_del_init(&cache->io_list);
3658                 btrfs_wait_cache_io(root, trans, cache,
3659                                     &cache->io_ctl, path, cache->key.objectid);
3660                 btrfs_put_block_group(cache);
3661         }
3662
3663         btrfs_free_path(path);
3664         return ret;
3665 }
3666
3667 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3668 {
3669         struct btrfs_block_group_cache *block_group;
3670         int readonly = 0;
3671
3672         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3673         if (!block_group || block_group->ro)
3674                 readonly = 1;
3675         if (block_group)
3676                 btrfs_put_block_group(block_group);
3677         return readonly;
3678 }
3679
3680 static const char *alloc_name(u64 flags)
3681 {
3682         switch (flags) {
3683         case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3684                 return "mixed";
3685         case BTRFS_BLOCK_GROUP_METADATA:
3686                 return "metadata";
3687         case BTRFS_BLOCK_GROUP_DATA:
3688                 return "data";
3689         case BTRFS_BLOCK_GROUP_SYSTEM:
3690                 return "system";
3691         default:
3692                 WARN_ON(1);
3693                 return "invalid-combination";
3694         };
3695 }
3696
3697 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3698                              u64 total_bytes, u64 bytes_used,
3699                              struct btrfs_space_info **space_info)
3700 {
3701         struct btrfs_space_info *found;
3702         int i;
3703         int factor;
3704         int ret;
3705
3706         if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3707                      BTRFS_BLOCK_GROUP_RAID10))
3708                 factor = 2;
3709         else
3710                 factor = 1;
3711
3712         found = __find_space_info(info, flags);
3713         if (found) {
3714                 spin_lock(&found->lock);
3715                 found->total_bytes += total_bytes;
3716                 found->disk_total += total_bytes * factor;
3717                 found->bytes_used += bytes_used;
3718                 found->disk_used += bytes_used * factor;
3719                 if (total_bytes > 0)
3720                         found->full = 0;
3721                 spin_unlock(&found->lock);
3722                 *space_info = found;
3723                 return 0;
3724         }
3725         found = kzalloc(sizeof(*found), GFP_NOFS);
3726         if (!found)
3727                 return -ENOMEM;
3728
3729         ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3730         if (ret) {
3731                 kfree(found);
3732                 return ret;
3733         }
3734
3735         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3736                 INIT_LIST_HEAD(&found->block_groups[i]);
3737         init_rwsem(&found->groups_sem);
3738         spin_lock_init(&found->lock);
3739         found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3740         found->total_bytes = total_bytes;
3741         found->disk_total = total_bytes * factor;
3742         found->bytes_used = bytes_used;
3743         found->disk_used = bytes_used * factor;
3744         found->bytes_pinned = 0;
3745         found->bytes_reserved = 0;
3746         found->bytes_readonly = 0;
3747         found->bytes_may_use = 0;
3748         found->full = 0;
3749         found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3750         found->chunk_alloc = 0;
3751         found->flush = 0;
3752         init_waitqueue_head(&found->wait);
3753         INIT_LIST_HEAD(&found->ro_bgs);
3754
3755         ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3756                                     info->space_info_kobj, "%s",
3757                                     alloc_name(found->flags));
3758         if (ret) {
3759                 kfree(found);
3760                 return ret;
3761         }
3762
3763         *space_info = found;
3764         list_add_rcu(&found->list, &info->space_info);
3765         if (flags & BTRFS_BLOCK_GROUP_DATA)
3766                 info->data_sinfo = found;
3767
3768         return ret;
3769 }
3770
3771 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3772 {
3773         u64 extra_flags = chunk_to_extended(flags) &
3774                                 BTRFS_EXTENDED_PROFILE_MASK;
3775
3776         write_seqlock(&fs_info->profiles_lock);
3777         if (flags & BTRFS_BLOCK_GROUP_DATA)
3778                 fs_info->avail_data_alloc_bits |= extra_flags;
3779         if (flags & BTRFS_BLOCK_GROUP_METADATA)
3780                 fs_info->avail_metadata_alloc_bits |= extra_flags;
3781         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3782                 fs_info->avail_system_alloc_bits |= extra_flags;
3783         write_sequnlock(&fs_info->profiles_lock);
3784 }
3785
3786 /*
3787  * returns target flags in extended format or 0 if restripe for this
3788  * chunk_type is not in progress
3789  *
3790  * should be called with either volume_mutex or balance_lock held
3791  */
3792 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3793 {
3794         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3795         u64 target = 0;
3796
3797         if (!bctl)
3798                 return 0;
3799
3800         if (flags & BTRFS_BLOCK_GROUP_DATA &&
3801             bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3802                 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3803         } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3804                    bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3805                 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3806         } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3807                    bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3808                 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3809         }
3810
3811         return target;
3812 }
3813
3814 /*
3815  * @flags: available profiles in extended format (see ctree.h)
3816  *
3817  * Returns reduced profile in chunk format.  If profile changing is in
3818  * progress (either running or paused) picks the target profile (if it's
3819  * already available), otherwise falls back to plain reducing.
3820  */
3821 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3822 {
3823         u64 num_devices = root->fs_info->fs_devices->rw_devices;
3824         u64 target;
3825         u64 tmp;
3826
3827         /*
3828          * see if restripe for this chunk_type is in progress, if so
3829          * try to reduce to the target profile
3830          */
3831         spin_lock(&root->fs_info->balance_lock);
3832         target = get_restripe_target(root->fs_info, flags);
3833         if (target) {
3834                 /* pick target profile only if it's already available */
3835                 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3836                         spin_unlock(&root->fs_info->balance_lock);
3837                         return extended_to_chunk(target);
3838                 }
3839         }
3840         spin_unlock(&root->fs_info->balance_lock);
3841
3842         /* First, mask out the RAID levels which aren't possible */
3843         if (num_devices == 1)
3844                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
3845                            BTRFS_BLOCK_GROUP_RAID5);
3846         if (num_devices < 3)
3847                 flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3848         if (num_devices < 4)
3849                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3850
3851         tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
3852                        BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
3853                        BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
3854         flags &= ~tmp;
3855
3856         if (tmp & BTRFS_BLOCK_GROUP_RAID6)
3857                 tmp = BTRFS_BLOCK_GROUP_RAID6;
3858         else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
3859                 tmp = BTRFS_BLOCK_GROUP_RAID5;
3860         else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
3861                 tmp = BTRFS_BLOCK_GROUP_RAID10;
3862         else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
3863                 tmp = BTRFS_BLOCK_GROUP_RAID1;
3864         else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
3865                 tmp = BTRFS_BLOCK_GROUP_RAID0;
3866
3867         return extended_to_chunk(flags | tmp);
3868 }
3869
3870 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3871 {
3872         unsigned seq;
3873         u64 flags;
3874
3875         do {
3876                 flags = orig_flags;
3877                 seq = read_seqbegin(&root->fs_info->profiles_lock);
3878
3879                 if (flags & BTRFS_BLOCK_GROUP_DATA)
3880                         flags |= root->fs_info->avail_data_alloc_bits;
3881                 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3882                         flags |= root->fs_info->avail_system_alloc_bits;
3883                 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3884                         flags |= root->fs_info->avail_metadata_alloc_bits;
3885         } while (read_seqretry(&root->fs_info->profiles_lock, seq));
3886
3887         return btrfs_reduce_alloc_profile(root, flags);
3888 }
3889
3890 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3891 {
3892         u64 flags;
3893         u64 ret;
3894
3895         if (data)
3896                 flags = BTRFS_BLOCK_GROUP_DATA;
3897         else if (root == root->fs_info->chunk_root)
3898                 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3899         else
3900                 flags = BTRFS_BLOCK_GROUP_METADATA;
3901
3902         ret = get_alloc_profile(root, flags);
3903         return ret;
3904 }
3905
3906 /*
3907  * This will check the space that the inode allocates from to make sure we have
3908  * enough space for bytes.
3909  */
3910 int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
3911 {
3912         struct btrfs_space_info *data_sinfo;
3913         struct btrfs_root *root = BTRFS_I(inode)->root;
3914         struct btrfs_fs_info *fs_info = root->fs_info;
3915         u64 used;
3916         int ret = 0;
3917         int need_commit = 2;
3918         int have_pinned_space;
3919
3920         /* make sure bytes are sectorsize aligned */
3921         bytes = ALIGN(bytes, root->sectorsize);
3922
3923         if (btrfs_is_free_space_inode(inode)) {
3924                 need_commit = 0;
3925                 ASSERT(current->journal_info);
3926         }
3927
3928         data_sinfo = fs_info->data_sinfo;
3929         if (!data_sinfo)
3930                 goto alloc;
3931
3932 again:
3933         /* make sure we have enough space to handle the data first */
3934         spin_lock(&data_sinfo->lock);
3935         used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3936                 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3937                 data_sinfo->bytes_may_use;
3938
3939         if (used + bytes > data_sinfo->total_bytes) {
3940                 struct btrfs_trans_handle *trans;
3941
3942                 /*
3943                  * if we don't have enough free bytes in this space then we need
3944                  * to alloc a new chunk.
3945                  */
3946                 if (!data_sinfo->full) {
3947                         u64 alloc_target;
3948
3949                         data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3950                         spin_unlock(&data_sinfo->lock);
3951 alloc:
3952                         alloc_target = btrfs_get_alloc_profile(root, 1);
3953                         /*
3954                          * It is ugly that we don't call nolock join
3955                          * transaction for the free space inode case here.
3956                          * But it is safe because we only do the data space
3957                          * reservation for the free space cache in the
3958                          * transaction context, the common join transaction
3959                          * just increase the counter of the current transaction
3960                          * handler, doesn't try to acquire the trans_lock of
3961                          * the fs.
3962                          */
3963                         trans = btrfs_join_transaction(root);
3964                         if (IS_ERR(trans))
3965                                 return PTR_ERR(trans);
3966
3967                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3968                                              alloc_target,
3969                                              CHUNK_ALLOC_NO_FORCE);
3970                         btrfs_end_transaction(trans, root);
3971                         if (ret < 0) {
3972                                 if (ret != -ENOSPC)
3973                                         return ret;
3974                                 else {
3975                                         have_pinned_space = 1;
3976                                         goto commit_trans;
3977                                 }
3978                         }
3979
3980                         if (!data_sinfo)
3981                                 data_sinfo = fs_info->data_sinfo;
3982
3983                         goto again;
3984                 }
3985
3986                 /*
3987                  * If we don't have enough pinned space to deal with this
3988                  * allocation, and no removed chunk in current transaction,
3989                  * don't bother committing the transaction.
3990                  */
3991                 have_pinned_space = percpu_counter_compare(
3992                         &data_sinfo->total_bytes_pinned,
3993                         used + bytes - data_sinfo->total_bytes);
3994                 spin_unlock(&data_sinfo->lock);
3995
3996                 /* commit the current transaction and try again */
3997 commit_trans:
3998                 if (need_commit &&
3999                     !atomic_read(&root->fs_info->open_ioctl_trans)) {
4000                         need_commit--;
4001
4002                         if (need_commit > 0)
4003                                 btrfs_wait_ordered_roots(fs_info, -1);
4004
4005                         trans = btrfs_join_transaction(root);
4006                         if (IS_ERR(trans))
4007                                 return PTR_ERR(trans);
4008                         if (have_pinned_space >= 0 ||
4009                             trans->transaction->have_free_bgs ||
4010                             need_commit > 0) {
4011                                 ret = btrfs_commit_transaction(trans, root);
4012                                 if (ret)
4013                                         return ret;
4014                                 /*
4015                                  * make sure that all running delayed iput are
4016                                  * done
4017                                  */
4018                                 down_write(&root->fs_info->delayed_iput_sem);
4019                                 up_write(&root->fs_info->delayed_iput_sem);
4020                                 goto again;
4021                         } else {
4022                                 btrfs_end_transaction(trans, root);
4023                         }
4024                 }
4025
4026                 trace_btrfs_space_reservation(root->fs_info,
4027                                               "space_info:enospc",
4028                                               data_sinfo->flags, bytes, 1);
4029                 return -ENOSPC;
4030         }
4031         ret = btrfs_qgroup_reserve(root, write_bytes);
4032         if (ret)
4033                 goto out;
4034         data_sinfo->bytes_may_use += bytes;
4035         trace_btrfs_space_reservation(root->fs_info, "space_info",
4036                                       data_sinfo->flags, bytes, 1);
4037 out:
4038         spin_unlock(&data_sinfo->lock);
4039
4040         return ret;
4041 }
4042
4043 /*
4044  * Called if we need to clear a data reservation for this inode.
4045  */
4046 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4047 {
4048         struct btrfs_root *root = BTRFS_I(inode)->root;
4049         struct btrfs_space_info *data_sinfo;
4050
4051         /* make sure bytes are sectorsize aligned */
4052         bytes = ALIGN(bytes, root->sectorsize);
4053
4054         data_sinfo = root->fs_info->data_sinfo;
4055         spin_lock(&data_sinfo->lock);
4056         WARN_ON(data_sinfo->bytes_may_use < bytes);
4057         data_sinfo->bytes_may_use -= bytes;
4058         trace_btrfs_space_reservation(root->fs_info, "space_info",
4059                                       data_sinfo->flags, bytes, 0);
4060         spin_unlock(&data_sinfo->lock);
4061 }
4062
4063 static void force_metadata_allocation(struct btrfs_fs_info *info)
4064 {
4065         struct list_head *head = &info->space_info;
4066         struct btrfs_space_info *found;
4067
4068         rcu_read_lock();
4069         list_for_each_entry_rcu(found, head, list) {
4070                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4071                         found->force_alloc = CHUNK_ALLOC_FORCE;
4072         }
4073         rcu_read_unlock();
4074 }
4075
4076 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4077 {
4078         return (global->size << 1);
4079 }
4080
4081 static int should_alloc_chunk(struct btrfs_root *root,
4082                               struct btrfs_space_info *sinfo, int force)
4083 {
4084         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4085         u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4086         u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4087         u64 thresh;
4088
4089         if (force == CHUNK_ALLOC_FORCE)
4090                 return 1;
4091
4092         /*
4093          * We need to take into account the global rsv because for all intents
4094          * and purposes it's used space.  Don't worry about locking the
4095          * global_rsv, it doesn't change except when the transaction commits.
4096          */
4097         if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4098                 num_allocated += calc_global_rsv_need_space(global_rsv);
4099
4100         /*
4101          * in limited mode, we want to have some free space up to
4102          * about 1% of the FS size.
4103          */
4104         if (force == CHUNK_ALLOC_LIMITED) {
4105                 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4106                 thresh = max_t(u64, 64 * 1024 * 1024,
4107                                div_factor_fine(thresh, 1));
4108
4109                 if (num_bytes - num_allocated < thresh)
4110                         return 1;
4111         }
4112
4113         if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4114                 return 0;
4115         return 1;
4116 }
4117
4118 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4119 {
4120         u64 num_dev;
4121
4122         if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4123                     BTRFS_BLOCK_GROUP_RAID0 |
4124                     BTRFS_BLOCK_GROUP_RAID5 |
4125                     BTRFS_BLOCK_GROUP_RAID6))
4126                 num_dev = root->fs_info->fs_devices->rw_devices;
4127         else if (type & BTRFS_BLOCK_GROUP_RAID1)
4128                 num_dev = 2;
4129         else
4130                 num_dev = 1;    /* DUP or single */
4131
4132         return num_dev;
4133 }
4134
4135 /*
4136  * If @is_allocation is true, reserve space in the system space info necessary
4137  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4138  * removing a chunk.
4139  */
4140 void check_system_chunk(struct btrfs_trans_handle *trans,
4141                         struct btrfs_root *root,
4142                         u64 type)
4143 {
4144         struct btrfs_space_info *info;
4145         u64 left;
4146         u64 thresh;
4147         int ret = 0;
4148         u64 num_devs;
4149
4150         /*
4151          * Needed because we can end up allocating a system chunk and for an
4152          * atomic and race free space reservation in the chunk block reserve.
4153          */
4154         ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4155
4156         info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4157         spin_lock(&info->lock);
4158         left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4159                 info->bytes_reserved - info->bytes_readonly -
4160                 info->bytes_may_use;
4161         spin_unlock(&info->lock);
4162
4163         num_devs = get_profile_num_devs(root, type);
4164
4165         /* num_devs device items to update and 1 chunk item to add or remove */
4166         thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4167                 btrfs_calc_trans_metadata_size(root, 1);
4168
4169         if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4170                 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4171                         left, thresh, type);
4172                 dump_space_info(info, 0, 0);
4173         }
4174
4175         if (left < thresh) {
4176                 u64 flags;
4177
4178                 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4179                 /*
4180                  * Ignore failure to create system chunk. We might end up not
4181                  * needing it, as we might not need to COW all nodes/leafs from
4182                  * the paths we visit in the chunk tree (they were already COWed
4183                  * or created in the current transaction for example).
4184                  */
4185                 ret = btrfs_alloc_chunk(trans, root, flags);
4186         }
4187
4188         if (!ret) {
4189                 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4190                                           &root->fs_info->chunk_block_rsv,
4191                                           thresh, BTRFS_RESERVE_NO_FLUSH);
4192                 if (!ret)
4193                         trans->chunk_bytes_reserved += thresh;
4194         }
4195 }
4196
4197 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4198                           struct btrfs_root *extent_root, u64 flags, int force)
4199 {
4200         struct btrfs_space_info *space_info;
4201         struct btrfs_fs_info *fs_info = extent_root->fs_info;
4202         int wait_for_alloc = 0;
4203         int ret = 0;
4204
4205         /* Don't re-enter if we're already allocating a chunk */
4206         if (trans->allocating_chunk)
4207                 return -ENOSPC;
4208
4209         space_info = __find_space_info(extent_root->fs_info, flags);
4210         if (!space_info) {
4211                 ret = update_space_info(extent_root->fs_info, flags,
4212                                         0, 0, &space_info);
4213                 BUG_ON(ret); /* -ENOMEM */
4214         }
4215         BUG_ON(!space_info); /* Logic error */
4216
4217 again:
4218         spin_lock(&space_info->lock);
4219         if (force < space_info->force_alloc)
4220                 force = space_info->force_alloc;
4221         if (space_info->full) {
4222                 if (should_alloc_chunk(extent_root, space_info, force))
4223                         ret = -ENOSPC;
4224                 else
4225                         ret = 0;
4226                 spin_unlock(&space_info->lock);
4227                 return ret;
4228         }
4229
4230         if (!should_alloc_chunk(extent_root, space_info, force)) {
4231                 spin_unlock(&space_info->lock);
4232                 return 0;
4233         } else if (space_info->chunk_alloc) {
4234                 wait_for_alloc = 1;
4235         } else {
4236                 space_info->chunk_alloc = 1;
4237         }
4238
4239         spin_unlock(&space_info->lock);
4240
4241         mutex_lock(&fs_info->chunk_mutex);
4242
4243         /*
4244          * The chunk_mutex is held throughout the entirety of a chunk
4245          * allocation, so once we've acquired the chunk_mutex we know that the
4246          * other guy is done and we need to recheck and see if we should
4247          * allocate.
4248          */
4249         if (wait_for_alloc) {
4250                 mutex_unlock(&fs_info->chunk_mutex);
4251                 wait_for_alloc = 0;
4252                 goto again;
4253         }
4254
4255         trans->allocating_chunk = true;
4256
4257         /*
4258          * If we have mixed data/metadata chunks we want to make sure we keep
4259          * allocating mixed chunks instead of individual chunks.
4260          */
4261         if (btrfs_mixed_space_info(space_info))
4262                 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4263
4264         /*
4265          * if we're doing a data chunk, go ahead and make sure that
4266          * we keep a reasonable number of metadata chunks allocated in the
4267          * FS as well.
4268          */
4269         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4270                 fs_info->data_chunk_allocations++;
4271                 if (!(fs_info->data_chunk_allocations %
4272                       fs_info->metadata_ratio))
4273                         force_metadata_allocation(fs_info);
4274         }
4275
4276         /*
4277          * Check if we have enough space in SYSTEM chunk because we may need
4278          * to update devices.
4279          */
4280         check_system_chunk(trans, extent_root, flags);
4281
4282         ret = btrfs_alloc_chunk(trans, extent_root, flags);
4283         trans->allocating_chunk = false;
4284
4285         spin_lock(&space_info->lock);
4286         if (ret < 0 && ret != -ENOSPC)
4287                 goto out;
4288         if (ret)
4289                 space_info->full = 1;
4290         else
4291                 ret = 1;
4292
4293         space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4294 out:
4295         space_info->chunk_alloc = 0;
4296         spin_unlock(&space_info->lock);
4297         mutex_unlock(&fs_info->chunk_mutex);
4298         /*
4299          * When we allocate a new chunk we reserve space in the chunk block
4300          * reserve to make sure we can COW nodes/leafs in the chunk tree or
4301          * add new nodes/leafs to it if we end up needing to do it when
4302          * inserting the chunk item and updating device items as part of the
4303          * second phase of chunk allocation, performed by
4304          * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4305          * large number of new block groups to create in our transaction
4306          * handle's new_bgs list to avoid exhausting the chunk block reserve
4307          * in extreme cases - like having a single transaction create many new
4308          * block groups when starting to write out the free space caches of all
4309          * the block groups that were made dirty during the lifetime of the
4310          * transaction.
4311          */
4312         if (trans->can_flush_pending_bgs &&
4313             trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4314                 btrfs_create_pending_block_groups(trans, trans->root);
4315                 btrfs_trans_release_chunk_metadata(trans);
4316         }
4317         return ret;
4318 }
4319
4320 static int can_overcommit(struct btrfs_root *root,
4321                           struct btrfs_space_info *space_info, u64 bytes,
4322                           enum btrfs_reserve_flush_enum flush)
4323 {
4324         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4325         u64 profile = btrfs_get_alloc_profile(root, 0);
4326         u64 space_size;
4327         u64 avail;
4328         u64 used;
4329
4330         used = space_info->bytes_used + space_info->bytes_reserved +
4331                 space_info->bytes_pinned + space_info->bytes_readonly;
4332
4333         /*
4334          * We only want to allow over committing if we have lots of actual space
4335          * free, but if we don't have enough space to handle the global reserve
4336          * space then we could end up having a real enospc problem when trying
4337          * to allocate a chunk or some other such important allocation.
4338          */
4339         spin_lock(&global_rsv->lock);
4340         space_size = calc_global_rsv_need_space(global_rsv);
4341         spin_unlock(&global_rsv->lock);
4342         if (used + space_size >= space_info->total_bytes)
4343                 return 0;
4344
4345         used += space_info->bytes_may_use;
4346
4347         spin_lock(&root->fs_info->free_chunk_lock);
4348         avail = root->fs_info->free_chunk_space;
4349         spin_unlock(&root->fs_info->free_chunk_lock);
4350
4351         /*
4352          * If we have dup, raid1 or raid10 then only half of the free
4353          * space is actually useable.  For raid56, the space info used
4354          * doesn't include the parity drive, so we don't have to
4355          * change the math
4356          */
4357         if (profile & (BTRFS_BLOCK_GROUP_DUP |
4358                        BTRFS_BLOCK_GROUP_RAID1 |
4359                        BTRFS_BLOCK_GROUP_RAID10))
4360                 avail >>= 1;
4361
4362         /*
4363          * If we aren't flushing all things, let us overcommit up to
4364          * 1/2th of the space. If we can flush, don't let us overcommit
4365          * too much, let it overcommit up to 1/8 of the space.
4366          */
4367         if (flush == BTRFS_RESERVE_FLUSH_ALL)
4368                 avail >>= 3;
4369         else
4370                 avail >>= 1;
4371
4372         if (used + bytes < space_info->total_bytes + avail)
4373                 return 1;
4374         return 0;
4375 }
4376
4377 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4378                                          unsigned long nr_pages, int nr_items)
4379 {
4380         struct super_block *sb = root->fs_info->sb;
4381
4382         if (down_read_trylock(&sb->s_umount)) {
4383                 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4384                 up_read(&sb->s_umount);
4385         } else {
4386                 /*
4387                  * We needn't worry the filesystem going from r/w to r/o though
4388                  * we don't acquire ->s_umount mutex, because the filesystem
4389                  * should guarantee the delalloc inodes list be empty after
4390                  * the filesystem is readonly(all dirty pages are written to
4391                  * the disk).
4392                  */
4393                 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4394                 if (!current->journal_info)
4395                         btrfs_wait_ordered_roots(root->fs_info, nr_items);
4396         }
4397 }
4398
4399 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4400 {
4401         u64 bytes;
4402         int nr;
4403
4404         bytes = btrfs_calc_trans_metadata_size(root, 1);
4405         nr = (int)div64_u64(to_reclaim, bytes);
4406         if (!nr)
4407                 nr = 1;
4408         return nr;
4409 }
4410
4411 #define EXTENT_SIZE_PER_ITEM    (256 * 1024)
4412
4413 /*
4414  * shrink metadata reservation for delalloc
4415  */
4416 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4417                             bool wait_ordered)
4418 {
4419         struct btrfs_block_rsv *block_rsv;
4420         struct btrfs_space_info *space_info;
4421         struct btrfs_trans_handle *trans;
4422         u64 delalloc_bytes;
4423         u64 max_reclaim;
4424         long time_left;
4425         unsigned long nr_pages;
4426         int loops;
4427         int items;
4428         enum btrfs_reserve_flush_enum flush;
4429
4430         /* Calc the number of the pages we need flush for space reservation */
4431         items = calc_reclaim_items_nr(root, to_reclaim);
4432         to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4433
4434         trans = (struct btrfs_trans_handle *)current->journal_info;
4435         block_rsv = &root->fs_info->delalloc_block_rsv;
4436         space_info = block_rsv->space_info;
4437
4438         delalloc_bytes = percpu_counter_sum_positive(
4439                                                 &root->fs_info->delalloc_bytes);
4440         if (delalloc_bytes == 0) {
4441                 if (trans)
4442                         return;
4443                 if (wait_ordered)
4444                         btrfs_wait_ordered_roots(root->fs_info, items);
4445                 return;
4446         }
4447
4448         loops = 0;
4449         while (delalloc_bytes && loops < 3) {
4450                 max_reclaim = min(delalloc_bytes, to_reclaim);
4451                 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4452                 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4453                 /*
4454                  * We need to wait for the async pages to actually start before
4455                  * we do anything.
4456                  */
4457                 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4458                 if (!max_reclaim)
4459                         goto skip_async;
4460
4461                 if (max_reclaim <= nr_pages)
4462                         max_reclaim = 0;
4463                 else
4464                         max_reclaim -= nr_pages;
4465
4466                 wait_event(root->fs_info->async_submit_wait,
4467                            atomic_read(&root->fs_info->async_delalloc_pages) <=
4468                            (int)max_reclaim);
4469 skip_async:
4470                 if (!trans)
4471                         flush = BTRFS_RESERVE_FLUSH_ALL;
4472                 else
4473                         flush = BTRFS_RESERVE_NO_FLUSH;
4474                 spin_lock(&space_info->lock);
4475                 if (can_overcommit(root, space_info, orig, flush)) {
4476                         spin_unlock(&space_info->lock);
4477                         break;
4478                 }
4479                 spin_unlock(&space_info->lock);
4480
4481                 loops++;
4482                 if (wait_ordered && !trans) {
4483                         btrfs_wait_ordered_roots(root->fs_info, items);
4484                 } else {
4485                         time_left = schedule_timeout_killable(1);
4486                         if (time_left)
4487                                 break;
4488                 }
4489                 delalloc_bytes = percpu_counter_sum_positive(
4490                                                 &root->fs_info->delalloc_bytes);
4491         }
4492 }
4493
4494 /**
4495  * maybe_commit_transaction - possibly commit the transaction if its ok to
4496  * @root - the root we're allocating for
4497  * @bytes - the number of bytes we want to reserve
4498  * @force - force the commit
4499  *
4500  * This will check to make sure that committing the transaction will actually
4501  * get us somewhere and then commit the transaction if it does.  Otherwise it
4502  * will return -ENOSPC.
4503  */
4504 static int may_commit_transaction(struct btrfs_root *root,
4505                                   struct btrfs_space_info *space_info,
4506                                   u64 bytes, int force)
4507 {
4508         struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4509         struct btrfs_trans_handle *trans;
4510
4511         trans = (struct btrfs_trans_handle *)current->journal_info;
4512         if (trans)
4513                 return -EAGAIN;
4514
4515         if (force)
4516                 goto commit;
4517
4518         /* See if there is enough pinned space to make this reservation */
4519         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4520                                    bytes) >= 0)
4521                 goto commit;
4522
4523         /*
4524          * See if there is some space in the delayed insertion reservation for
4525          * this reservation.
4526          */
4527         if (space_info != delayed_rsv->space_info)
4528                 return -ENOSPC;
4529
4530         spin_lock(&delayed_rsv->lock);
4531         if (percpu_counter_compare(&space_info->total_bytes_pinned,
4532                                    bytes - delayed_rsv->size) >= 0) {
4533                 spin_unlock(&delayed_rsv->lock);
4534                 return -ENOSPC;
4535         }
4536         spin_unlock(&delayed_rsv->lock);
4537
4538 commit:
4539         trans = btrfs_join_transaction(root);
4540         if (IS_ERR(trans))
4541                 return -ENOSPC;
4542
4543         return btrfs_commit_transaction(trans, root);
4544 }
4545
4546 enum flush_state {
4547         FLUSH_DELAYED_ITEMS_NR  =       1,
4548         FLUSH_DELAYED_ITEMS     =       2,
4549         FLUSH_DELALLOC          =       3,
4550         FLUSH_DELALLOC_WAIT     =       4,
4551         ALLOC_CHUNK             =       5,
4552         COMMIT_TRANS            =       6,
4553 };
4554
4555 static int flush_space(struct btrfs_root *root,
4556                        struct btrfs_space_info *space_info, u64 num_bytes,
4557                        u64 orig_bytes, int state)
4558 {
4559         struct btrfs_trans_handle *trans;
4560         int nr;
4561         int ret = 0;
4562
4563         switch (state) {
4564         case FLUSH_DELAYED_ITEMS_NR:
4565         case FLUSH_DELAYED_ITEMS:
4566                 if (state == FLUSH_DELAYED_ITEMS_NR)
4567                         nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4568                 else
4569                         nr = -1;
4570
4571                 trans = btrfs_join_transaction(root);
4572                 if (IS_ERR(trans)) {
4573                         ret = PTR_ERR(trans);
4574                         break;
4575                 }
4576                 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4577                 btrfs_end_transaction(trans, root);
4578                 break;
4579         case FLUSH_DELALLOC:
4580         case FLUSH_DELALLOC_WAIT:
4581                 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4582                                 state == FLUSH_DELALLOC_WAIT);
4583                 break;
4584         case ALLOC_CHUNK:
4585                 trans = btrfs_join_transaction(root);
4586                 if (IS_ERR(trans)) {
4587                         ret = PTR_ERR(trans);
4588                         break;
4589                 }
4590                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4591                                      btrfs_get_alloc_profile(root, 0),
4592                                      CHUNK_ALLOC_NO_FORCE);
4593                 btrfs_end_transaction(trans, root);
4594                 if (ret == -ENOSPC)
4595                         ret = 0;
4596                 break;
4597         case COMMIT_TRANS:
4598                 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4599                 break;
4600         default:
4601                 ret = -ENOSPC;
4602                 break;
4603         }
4604
4605         return ret;
4606 }
4607
4608 static inline u64
4609 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4610                                  struct btrfs_space_info *space_info)
4611 {
4612         u64 used;
4613         u64 expected;
4614         u64 to_reclaim;
4615
4616         to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4617                                 16 * 1024 * 1024);
4618         spin_lock(&space_info->lock);
4619         if (can_overcommit(root, space_info, to_reclaim,
4620                            BTRFS_RESERVE_FLUSH_ALL)) {
4621                 to_reclaim = 0;
4622                 goto out;
4623         }
4624
4625         used = space_info->bytes_used + space_info->bytes_reserved +
4626                space_info->bytes_pinned + space_info->bytes_readonly +
4627                space_info->bytes_may_use;
4628         if (can_overcommit(root, space_info, 1024 * 1024,
4629                            BTRFS_RESERVE_FLUSH_ALL))
4630                 expected = div_factor_fine(space_info->total_bytes, 95);
4631         else
4632                 expected = div_factor_fine(space_info->total_bytes, 90);
4633
4634         if (used > expected)
4635                 to_reclaim = used - expected;
4636         else
4637                 to_reclaim = 0;
4638         to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4639                                      space_info->bytes_reserved);
4640 out:
4641         spin_unlock(&space_info->lock);
4642
4643         return to_reclaim;
4644 }
4645
4646 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4647                                         struct btrfs_fs_info *fs_info, u64 used)
4648 {
4649         u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4650
4651         /* If we're just plain full then async reclaim just slows us down. */
4652         if (space_info->bytes_used >= thresh)
4653                 return 0;
4654
4655         return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4656                 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4657 }
4658
4659 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4660                                        struct btrfs_fs_info *fs_info,
4661                                        int flush_state)
4662 {
4663         u64 used;
4664
4665         spin_lock(&space_info->lock);
4666         /*
4667          * We run out of space and have not got any free space via flush_space,
4668          * so don't bother doing async reclaim.
4669          */
4670         if (flush_state > COMMIT_TRANS && space_info->full) {
4671                 spin_unlock(&space_info->lock);
4672                 return 0;
4673         }
4674
4675         used = space_info->bytes_used + space_info->bytes_reserved +
4676                space_info->bytes_pinned + space_info->bytes_readonly +
4677                space_info->bytes_may_use;
4678         if (need_do_async_reclaim(space_info, fs_info, used)) {
4679                 spin_unlock(&space_info->lock);
4680                 return 1;
4681         }
4682         spin_unlock(&space_info->lock);
4683
4684         return 0;
4685 }
4686
4687 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4688 {
4689         struct btrfs_fs_info *fs_info;
4690         struct btrfs_space_info *space_info;
4691         u64 to_reclaim;
4692         int flush_state;
4693
4694         fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4695         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4696
4697         to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4698                                                       space_info);
4699         if (!to_reclaim)
4700                 return;
4701
4702         flush_state = FLUSH_DELAYED_ITEMS_NR;
4703         do {
4704                 flush_space(fs_info->fs_root, space_info, to_reclaim,
4705                             to_reclaim, flush_state);
4706                 flush_state++;
4707                 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4708                                                  flush_state))
4709                         return;
4710         } while (flush_state < COMMIT_TRANS);
4711 }
4712
4713 void btrfs_init_async_reclaim_work(struct work_struct *work)
4714 {
4715         INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4716 }
4717
4718 /**
4719  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4720  * @root - the root we're allocating for
4721  * @block_rsv - the block_rsv we're allocating for
4722  * @orig_bytes - the number of bytes we want
4723  * @flush - whether or not we can flush to make our reservation
4724  *
4725  * This will reserve orgi_bytes number of bytes from the space info associated
4726  * with the block_rsv.  If there is not enough space it will make an attempt to
4727  * flush out space to make room.  It will do this by flushing delalloc if
4728  * possible or committing the transaction.  If flush is 0 then no attempts to
4729  * regain reservations will be made and this will fail if there is not enough
4730  * space already.
4731  */
4732 static int reserve_metadata_bytes(struct btrfs_root *root,
4733                                   struct btrfs_block_rsv *block_rsv,
4734                                   u64 orig_bytes,
4735                                   enum btrfs_reserve_flush_enum flush)
4736 {
4737         struct btrfs_space_info *space_info = block_rsv->space_info;
4738         u64 used;
4739         u64 num_bytes = orig_bytes;
4740         int flush_state = FLUSH_DELAYED_ITEMS_NR;
4741         int ret = 0;
4742         bool flushing = false;
4743
4744 again:
4745         ret = 0;
4746         spin_lock(&space_info->lock);
4747         /*
4748          * We only want to wait if somebody other than us is flushing and we
4749          * are actually allowed to flush all things.
4750          */
4751         while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4752                space_info->flush) {
4753                 spin_unlock(&space_info->lock);
4754                 /*
4755                  * If we have a trans handle we can't wait because the flusher
4756                  * may have to commit the transaction, which would mean we would
4757                  * deadlock since we are waiting for the flusher to finish, but
4758                  * hold the current transaction open.
4759                  */
4760                 if (current->journal_info)
4761                         return -EAGAIN;
4762                 ret = wait_event_killable(space_info->wait, !space_info->flush);
4763                 /* Must have been killed, return */
4764                 if (ret)
4765                         return -EINTR;
4766
4767                 spin_lock(&space_info->lock);
4768         }
4769
4770         ret = -ENOSPC;
4771         used = space_info->bytes_used + space_info->bytes_reserved +
4772                 space_info->bytes_pinned + space_info->bytes_readonly +
4773                 space_info->bytes_may_use;
4774
4775         /*
4776          * The idea here is that we've not already over-reserved the block group
4777          * then we can go ahead and save our reservation first and then start
4778          * flushing if we need to.  Otherwise if we've already overcommitted
4779          * lets start flushing stuff first and then come back and try to make
4780          * our reservation.
4781          */
4782         if (used <= space_info->total_bytes) {
4783                 if (used + orig_bytes <= space_info->total_bytes) {
4784                         space_info->bytes_may_use += orig_bytes;
4785                         trace_btrfs_space_reservation(root->fs_info,
4786                                 "space_info", space_info->flags, orig_bytes, 1);
4787                         ret = 0;
4788                 } else {
4789                         /*
4790                          * Ok set num_bytes to orig_bytes since we aren't
4791                          * overocmmitted, this way we only try and reclaim what
4792                          * we need.
4793                          */
4794                         num_bytes = orig_bytes;
4795                 }
4796         } else {
4797                 /*
4798                  * Ok we're over committed, set num_bytes to the overcommitted
4799                  * amount plus the amount of bytes that we need for this
4800                  * reservation.
4801                  */
4802                 num_bytes = used - space_info->total_bytes +
4803                         (orig_bytes * 2);
4804         }
4805
4806         if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4807                 space_info->bytes_may_use += orig_bytes;
4808                 trace_btrfs_space_reservation(root->fs_info, "space_info",
4809                                               space_info->flags, orig_bytes,
4810                                               1);
4811                 ret = 0;
4812         }
4813
4814         /*
4815          * Couldn't make our reservation, save our place so while we're trying
4816          * to reclaim space we can actually use it instead of somebody else
4817          * stealing it from us.
4818          *
4819          * We make the other tasks wait for the flush only when we can flush
4820          * all things.
4821          */
4822         if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4823                 flushing = true;
4824                 space_info->flush = 1;
4825         } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4826                 used += orig_bytes;
4827                 /*
4828                  * We will do the space reservation dance during log replay,
4829                  * which means we won't have fs_info->fs_root set, so don't do
4830                  * the async reclaim as we will panic.
4831                  */
4832                 if (!root->fs_info->log_root_recovering &&
4833                     need_do_async_reclaim(space_info, root->fs_info, used) &&
4834                     !work_busy(&root->fs_info->async_reclaim_work))
4835                         queue_work(system_unbound_wq,
4836                                    &root->fs_info->async_reclaim_work);
4837         }
4838         spin_unlock(&space_info->lock);
4839
4840         if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4841                 goto out;
4842
4843         ret = flush_space(root, space_info, num_bytes, orig_bytes,
4844                           flush_state);
4845         flush_state++;
4846
4847         /*
4848          * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4849          * would happen. So skip delalloc flush.
4850          */
4851         if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4852             (flush_state == FLUSH_DELALLOC ||
4853              flush_state == FLUSH_DELALLOC_WAIT))
4854                 flush_state = ALLOC_CHUNK;
4855
4856         if (!ret)
4857                 goto again;
4858         else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
4859                  flush_state < COMMIT_TRANS)
4860                 goto again;
4861         else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
4862                  flush_state <= COMMIT_TRANS)
4863                 goto again;
4864
4865 out:
4866         if (ret == -ENOSPC &&
4867             unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
4868                 struct btrfs_block_rsv *global_rsv =
4869                         &root->fs_info->global_block_rsv;
4870
4871                 if (block_rsv != global_rsv &&
4872                     !block_rsv_use_bytes(global_rsv, orig_bytes))
4873                         ret = 0;
4874         }
4875         if (ret == -ENOSPC)
4876                 trace_btrfs_space_reservation(root->fs_info,
4877                                               "space_info:enospc",
4878                                               space_info->flags, orig_bytes, 1);
4879         if (flushing) {
4880                 spin_lock(&space_info->lock);
4881                 space_info->flush = 0;
4882                 wake_up_all(&space_info->wait);
4883                 spin_unlock(&space_info->lock);
4884         }
4885         return ret;
4886 }
4887
4888 static struct btrfs_block_rsv *get_block_rsv(
4889                                         const struct btrfs_trans_handle *trans,
4890                                         const struct btrfs_root *root)
4891 {
4892         struct btrfs_block_rsv *block_rsv = NULL;
4893
4894         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4895                 block_rsv = trans->block_rsv;
4896
4897         if (root == root->fs_info->csum_root && trans->adding_csums)
4898                 block_rsv = trans->block_rsv;
4899
4900         if (root == root->fs_info->uuid_root)
4901                 block_rsv = trans->block_rsv;
4902
4903         if (!block_rsv)
4904                 block_rsv = root->block_rsv;
4905
4906         if (!block_rsv)
4907                 block_rsv = &root->fs_info->empty_block_rsv;
4908
4909         return block_rsv;
4910 }
4911
4912 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
4913                                u64 num_bytes)
4914 {
4915         int ret = -ENOSPC;
4916         spin_lock(&block_rsv->lock);
4917         if (block_rsv->reserved >= num_bytes) {
4918                 block_rsv->reserved -= num_bytes;
4919                 if (block_rsv->reserved < block_rsv->size)
4920                         block_rsv->full = 0;
4921                 ret = 0;
4922         }
4923         spin_unlock(&block_rsv->lock);
4924         return ret;
4925 }
4926
4927 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
4928                                 u64 num_bytes, int update_size)
4929 {
4930         spin_lock(&block_rsv->lock);
4931         block_rsv->reserved += num_bytes;
4932         if (update_size)
4933                 block_rsv->size += num_bytes;
4934         else if (block_rsv->reserved >= block_rsv->size)
4935                 block_rsv->full = 1;
4936         spin_unlock(&block_rsv->lock);
4937 }
4938
4939 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
4940                              struct btrfs_block_rsv *dest, u64 num_bytes,
4941                              int min_factor)
4942 {
4943         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
4944         u64 min_bytes;
4945
4946         if (global_rsv->space_info != dest->space_info)
4947                 return -ENOSPC;
4948
4949         spin_lock(&global_rsv->lock);
4950         min_bytes = div_factor(global_rsv->size, min_factor);
4951         if (global_rsv->reserved < min_bytes + num_bytes) {
4952                 spin_unlock(&global_rsv->lock);
4953                 return -ENOSPC;
4954         }
4955         global_rsv->reserved -= num_bytes;
4956         if (global_rsv->reserved < global_rsv->size)
4957                 global_rsv->full = 0;
4958         spin_unlock(&global_rsv->lock);
4959
4960         block_rsv_add_bytes(dest, num_bytes, 1);
4961         return 0;
4962 }
4963
4964 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
4965                                     struct btrfs_block_rsv *block_rsv,
4966                                     struct btrfs_block_rsv *dest, u64 num_bytes)
4967 {
4968         struct btrfs_space_info *space_info = block_rsv->space_info;
4969
4970         spin_lock(&block_rsv->lock);
4971         if (num_bytes == (u64)-1)
4972                 num_bytes = block_rsv->size;
4973         block_rsv->size -= num_bytes;
4974         if (block_rsv->reserved >= block_rsv->size) {
4975                 num_bytes = block_rsv->reserved - block_rsv->size;
4976                 block_rsv->reserved = block_rsv->size;
4977                 block_rsv->full = 1;
4978         } else {
4979                 num_bytes = 0;
4980         }
4981         spin_unlock(&block_rsv->lock);
4982
4983         if (num_bytes > 0) {
4984                 if (dest) {
4985                         spin_lock(&dest->lock);
4986                         if (!dest->full) {
4987                                 u64 bytes_to_add;
4988
4989                                 bytes_to_add = dest->size - dest->reserved;
4990                                 bytes_to_add = min(num_bytes, bytes_to_add);
4991                                 dest->reserved += bytes_to_add;
4992                                 if (dest->reserved >= dest->size)
4993                                         dest->full = 1;
4994                                 num_bytes -= bytes_to_add;
4995                         }
4996                         spin_unlock(&dest->lock);
4997                 }
4998                 if (num_bytes) {
4999                         spin_lock(&space_info->lock);
5000                         space_info->bytes_may_use -= num_bytes;
5001                         trace_btrfs_space_reservation(fs_info, "space_info",
5002                                         space_info->flags, num_bytes, 0);
5003                         spin_unlock(&space_info->lock);
5004                 }
5005         }
5006 }
5007
5008 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5009                                    struct btrfs_block_rsv *dst, u64 num_bytes)
5010 {
5011         int ret;
5012
5013         ret = block_rsv_use_bytes(src, num_bytes);
5014         if (ret)
5015                 return ret;
5016
5017         block_rsv_add_bytes(dst, num_bytes, 1);
5018         return 0;
5019 }
5020
5021 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5022 {
5023         memset(rsv, 0, sizeof(*rsv));
5024         spin_lock_init(&rsv->lock);
5025         rsv->type = type;
5026 }
5027
5028 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5029                                               unsigned short type)
5030 {
5031         struct btrfs_block_rsv *block_rsv;
5032         struct btrfs_fs_info *fs_info = root->fs_info;
5033
5034         block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5035         if (!block_rsv)
5036                 return NULL;
5037
5038         btrfs_init_block_rsv(block_rsv, type);
5039         block_rsv->space_info = __find_space_info(fs_info,
5040                                                   BTRFS_BLOCK_GROUP_METADATA);
5041         return block_rsv;
5042 }
5043
5044 void btrfs_free_block_rsv(struct btrfs_root *root,
5045                           struct btrfs_block_rsv *rsv)
5046 {
5047         if (!rsv)
5048                 return;
5049         btrfs_block_rsv_release(root, rsv, (u64)-1);
5050         kfree(rsv);
5051 }
5052
5053 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5054 {
5055         kfree(rsv);
5056 }
5057
5058 int btrfs_block_rsv_add(struct btrfs_root *root,
5059                         struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5060                         enum btrfs_reserve_flush_enum flush)
5061 {
5062         int ret;
5063
5064         if (num_bytes == 0)
5065                 return 0;
5066
5067         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5068         if (!ret) {
5069                 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5070                 return 0;
5071         }
5072
5073         return ret;
5074 }
5075
5076 int btrfs_block_rsv_check(struct btrfs_root *root,
5077                           struct btrfs_block_rsv *block_rsv, int min_factor)
5078 {
5079         u64 num_bytes = 0;
5080         int ret = -ENOSPC;
5081
5082         if (!block_rsv)
5083                 return 0;
5084
5085         spin_lock(&block_rsv->lock);
5086         num_bytes = div_factor(block_rsv->size, min_factor);
5087         if (block_rsv->reserved >= num_bytes)
5088                 ret = 0;
5089         spin_unlock(&block_rsv->lock);
5090
5091         return ret;
5092 }
5093
5094 int btrfs_block_rsv_refill(struct btrfs_root *root,
5095                            struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5096                            enum btrfs_reserve_flush_enum flush)
5097 {
5098         u64 num_bytes = 0;
5099         int ret = -ENOSPC;
5100
5101         if (!block_rsv)
5102                 return 0;
5103
5104         spin_lock(&block_rsv->lock);
5105         num_bytes = min_reserved;
5106         if (block_rsv->reserved >= num_bytes)
5107                 ret = 0;
5108         else
5109                 num_bytes -= block_rsv->reserved;
5110         spin_unlock(&block_rsv->lock);
5111
5112         if (!ret)
5113                 return 0;
5114
5115         ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5116         if (!ret) {
5117                 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5118                 return 0;
5119         }
5120
5121         return ret;
5122 }
5123
5124 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5125                             struct btrfs_block_rsv *dst_rsv,
5126                             u64 num_bytes)
5127 {
5128         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5129 }
5130
5131 void btrfs_block_rsv_release(struct btrfs_root *root,
5132                              struct btrfs_block_rsv *block_rsv,
5133                              u64 num_bytes)
5134 {
5135         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5136         if (global_rsv == block_rsv ||
5137             block_rsv->space_info != global_rsv->space_info)
5138                 global_rsv = NULL;
5139         block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5140                                 num_bytes);
5141 }
5142
5143 /*
5144  * helper to calculate size of global block reservation.
5145  * the desired value is sum of space used by extent tree,
5146  * checksum tree and root tree
5147  */
5148 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5149 {
5150         struct btrfs_space_info *sinfo;
5151         u64 num_bytes;
5152         u64 meta_used;
5153         u64 data_used;
5154         int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5155
5156         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5157         spin_lock(&sinfo->lock);
5158         data_used = sinfo->bytes_used;
5159         spin_unlock(&sinfo->lock);
5160
5161         sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5162         spin_lock(&sinfo->lock);
5163         if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5164                 data_used = 0;
5165         meta_used = sinfo->bytes_used;
5166         spin_unlock(&sinfo->lock);
5167
5168         num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5169                     csum_size * 2;
5170         num_bytes += div_u64(data_used + meta_used, 50);
5171
5172         if (num_bytes * 3 > meta_used)
5173                 num_bytes = div_u64(meta_used, 3);
5174
5175         return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5176 }
5177
5178 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5179 {
5180         struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5181         struct btrfs_space_info *sinfo = block_rsv->space_info;
5182         u64 num_bytes;
5183
5184         num_bytes = calc_global_metadata_size(fs_info);
5185
5186         spin_lock(&sinfo->lock);
5187         spin_lock(&block_rsv->lock);
5188
5189         block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5190
5191         num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5192                     sinfo->bytes_reserved + sinfo->bytes_readonly +
5193                     sinfo->bytes_may_use;
5194
5195         if (sinfo->total_bytes > num_bytes) {
5196                 num_bytes = sinfo->total_bytes - num_bytes;
5197                 block_rsv->reserved += num_bytes;
5198                 sinfo->bytes_may_use += num_bytes;
5199                 trace_btrfs_space_reservation(fs_info, "space_info",
5200                                       sinfo->flags, num_bytes, 1);
5201         }
5202
5203         if (block_rsv->reserved >= block_rsv->size) {
5204                 num_bytes = block_rsv->reserved - block_rsv->size;
5205                 sinfo->bytes_may_use -= num_bytes;
5206                 trace_btrfs_space_reservation(fs_info, "space_info",
5207                                       sinfo->flags, num_bytes, 0);
5208                 block_rsv->reserved = block_rsv->size;
5209                 block_rsv->full = 1;
5210         }
5211
5212         spin_unlock(&block_rsv->lock);
5213         spin_unlock(&sinfo->lock);
5214 }
5215
5216 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5217 {
5218         struct btrfs_space_info *space_info;
5219
5220         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5221         fs_info->chunk_block_rsv.space_info = space_info;
5222
5223         space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5224         fs_info->global_block_rsv.space_info = space_info;
5225         fs_info->delalloc_block_rsv.space_info = space_info;
5226         fs_info->trans_block_rsv.space_info = space_info;
5227         fs_info->empty_block_rsv.space_info = space_info;
5228         fs_info->delayed_block_rsv.space_info = space_info;
5229
5230         fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5231         fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5232         fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5233         fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5234         if (fs_info->quota_root)
5235                 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5236         fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5237
5238         update_global_block_rsv(fs_info);
5239 }
5240
5241 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5242 {
5243         block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5244                                 (u64)-1);
5245         WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5246         WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5247         WARN_ON(fs_info->trans_block_rsv.size > 0);
5248         WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5249         WARN_ON(fs_info->chunk_block_rsv.size > 0);
5250         WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5251         WARN_ON(fs_info->delayed_block_rsv.size > 0);
5252         WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5253 }
5254
5255 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5256                                   struct btrfs_root *root)
5257 {
5258         if (!trans->block_rsv)
5259                 return;
5260
5261         if (!trans->bytes_reserved)
5262                 return;
5263
5264         trace_btrfs_space_reservation(root->fs_info, "transaction",
5265                                       trans->transid, trans->bytes_reserved, 0);
5266         btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5267         trans->bytes_reserved = 0;
5268 }
5269
5270 /*
5271  * To be called after all the new block groups attached to the transaction
5272  * handle have been created (btrfs_create_pending_block_groups()).
5273  */
5274 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5275 {
5276         struct btrfs_fs_info *fs_info = trans->root->fs_info;
5277
5278         if (!trans->chunk_bytes_reserved)
5279                 return;
5280
5281         WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5282
5283         block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5284                                 trans->chunk_bytes_reserved);
5285         trans->chunk_bytes_reserved = 0;
5286 }
5287
5288 /* Can only return 0 or -ENOSPC */
5289 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5290                                   struct inode *inode)
5291 {
5292         struct btrfs_root *root = BTRFS_I(inode)->root;
5293         struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5294         struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5295
5296         /*
5297          * We need to hold space in order to delete our orphan item once we've
5298          * added it, so this takes the reservation so we can release it later
5299          * when we are truly done with the orphan item.
5300          */
5301         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5302         trace_btrfs_space_reservation(root->fs_info, "orphan",
5303                                       btrfs_ino(inode), num_bytes, 1);
5304         return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5305 }
5306
5307 void btrfs_orphan_release_metadata(struct inode *inode)
5308 {
5309         struct btrfs_root *root = BTRFS_I(inode)->root;
5310         u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5311         trace_btrfs_space_reservation(root->fs_info, "orphan",
5312                                       btrfs_ino(inode), num_bytes, 0);
5313         btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5314 }
5315
5316 /*
5317  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5318  * root: the root of the parent directory
5319  * rsv: block reservation
5320  * items: the number of items that we need do reservation
5321  * qgroup_reserved: used to return the reserved size in qgroup
5322  *
5323  * This function is used to reserve the space for snapshot/subvolume
5324  * creation and deletion. Those operations are different with the
5325  * common file/directory operations, they change two fs/file trees
5326  * and root tree, the number of items that the qgroup reserves is
5327  * different with the free space reservation. So we can not use
5328  * the space reseravtion mechanism in start_transaction().
5329  */
5330 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5331                                      struct btrfs_block_rsv *rsv,
5332                                      int items,
5333                                      u64 *qgroup_reserved,
5334                                      bool use_global_rsv)
5335 {
5336         u64 num_bytes;
5337         int ret;
5338         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5339
5340         if (root->fs_info->quota_enabled) {
5341                 /* One for parent inode, two for dir entries */
5342                 num_bytes = 3 * root->nodesize;
5343                 ret = btrfs_qgroup_reserve(root, num_bytes);
5344                 if (ret)
5345                         return ret;
5346         } else {
5347                 num_bytes = 0;
5348         }
5349
5350         *qgroup_reserved = num_bytes;
5351
5352         num_bytes = btrfs_calc_trans_metadata_size(root, items);
5353         rsv->space_info = __find_space_info(root->fs_info,
5354                                             BTRFS_BLOCK_GROUP_METADATA);
5355         ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5356                                   BTRFS_RESERVE_FLUSH_ALL);
5357
5358         if (ret == -ENOSPC && use_global_rsv)
5359                 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5360
5361         if (ret) {
5362                 if (*qgroup_reserved)
5363                         btrfs_qgroup_free(root, *qgroup_reserved);
5364         }
5365
5366         return ret;
5367 }
5368
5369 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5370                                       struct btrfs_block_rsv *rsv,
5371                                       u64 qgroup_reserved)
5372 {
5373         btrfs_block_rsv_release(root, rsv, (u64)-1);
5374 }
5375
5376 /**
5377  * drop_outstanding_extent - drop an outstanding extent
5378  * @inode: the inode we're dropping the extent for
5379  * @num_bytes: the number of bytes we're relaseing.
5380  *
5381  * This is called when we are freeing up an outstanding extent, either called
5382  * after an error or after an extent is written.  This will return the number of
5383  * reserved extents that need to be freed.  This must be called with
5384  * BTRFS_I(inode)->lock held.
5385  */
5386 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5387 {
5388         unsigned drop_inode_space = 0;
5389         unsigned dropped_extents = 0;
5390         unsigned num_extents = 0;
5391
5392         num_extents = (unsigned)div64_u64(num_bytes +
5393                                           BTRFS_MAX_EXTENT_SIZE - 1,
5394                                           BTRFS_MAX_EXTENT_SIZE);
5395         ASSERT(num_extents);
5396         ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5397         BTRFS_I(inode)->outstanding_extents -= num_extents;
5398
5399         if (BTRFS_I(inode)->outstanding_extents == 0 &&
5400             test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5401                                &BTRFS_I(inode)->runtime_flags))
5402                 drop_inode_space = 1;
5403
5404         /*
5405          * If we have more or the same amount of outsanding extents than we have
5406          * reserved then we need to leave the reserved extents count alone.
5407          */
5408         if (BTRFS_I(inode)->outstanding_extents >=
5409             BTRFS_I(inode)->reserved_extents)
5410                 return drop_inode_space;
5411
5412         dropped_extents = BTRFS_I(inode)->reserved_extents -
5413                 BTRFS_I(inode)->outstanding_extents;
5414         BTRFS_I(inode)->reserved_extents -= dropped_extents;
5415         return dropped_extents + drop_inode_space;
5416 }
5417
5418 /**
5419  * calc_csum_metadata_size - return the amount of metada space that must be
5420  *      reserved/free'd for the given bytes.
5421  * @inode: the inode we're manipulating
5422  * @num_bytes: the number of bytes in question
5423  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5424  *
5425  * This adjusts the number of csum_bytes in the inode and then returns the
5426  * correct amount of metadata that must either be reserved or freed.  We
5427  * calculate how many checksums we can fit into one leaf and then divide the
5428  * number of bytes that will need to be checksumed by this value to figure out
5429  * how many checksums will be required.  If we are adding bytes then the number
5430  * may go up and we will return the number of additional bytes that must be
5431  * reserved.  If it is going down we will return the number of bytes that must
5432  * be freed.
5433  *
5434  * This must be called with BTRFS_I(inode)->lock held.
5435  */
5436 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5437                                    int reserve)
5438 {
5439         struct btrfs_root *root = BTRFS_I(inode)->root;
5440         u64 old_csums, num_csums;
5441
5442         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5443             BTRFS_I(inode)->csum_bytes == 0)
5444                 return 0;
5445
5446         old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5447         if (reserve)
5448                 BTRFS_I(inode)->csum_bytes += num_bytes;
5449         else
5450                 BTRFS_I(inode)->csum_bytes -= num_bytes;
5451         num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5452
5453         /* No change, no need to reserve more */
5454         if (old_csums == num_csums)
5455                 return 0;
5456
5457         if (reserve)
5458                 return btrfs_calc_trans_metadata_size(root,
5459                                                       num_csums - old_csums);
5460
5461         return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5462 }
5463
5464 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5465 {
5466         struct btrfs_root *root = BTRFS_I(inode)->root;
5467         struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5468         u64 to_reserve = 0;
5469         u64 csum_bytes;
5470         unsigned nr_extents = 0;
5471         int extra_reserve = 0;
5472         enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5473         int ret = 0;
5474         bool delalloc_lock = true;
5475         u64 to_free = 0;
5476         unsigned dropped;
5477
5478         /* If we are a free space inode we need to not flush since we will be in
5479          * the middle of a transaction commit.  We also don't need the delalloc
5480          * mutex since we won't race with anybody.  We need this mostly to make
5481          * lockdep shut its filthy mouth.
5482          */
5483         if (btrfs_is_free_space_inode(inode)) {
5484                 flush = BTRFS_RESERVE_NO_FLUSH;
5485                 delalloc_lock = false;
5486         }
5487
5488         if (flush != BTRFS_RESERVE_NO_FLUSH &&
5489             btrfs_transaction_in_commit(root->fs_info))
5490                 schedule_timeout(1);
5491
5492         if (delalloc_lock)
5493                 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5494
5495         num_bytes = ALIGN(num_bytes, root->sectorsize);
5496
5497         spin_lock(&BTRFS_I(inode)->lock);
5498         nr_extents = (unsigned)div64_u64(num_bytes +
5499                                          BTRFS_MAX_EXTENT_SIZE - 1,
5500                                          BTRFS_MAX_EXTENT_SIZE);
5501         BTRFS_I(inode)->outstanding_extents += nr_extents;
5502         nr_extents = 0;
5503
5504         if (BTRFS_I(inode)->outstanding_extents >
5505             BTRFS_I(inode)->reserved_extents)
5506                 nr_extents = BTRFS_I(inode)->outstanding_extents -
5507                         BTRFS_I(inode)->reserved_extents;
5508
5509         /*
5510          * Add an item to reserve for updating the inode when we complete the
5511          * delalloc io.
5512          */
5513         if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5514                       &BTRFS_I(inode)->runtime_flags)) {
5515                 nr_extents++;
5516                 extra_reserve = 1;
5517         }
5518
5519         to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5520         to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5521         csum_bytes = BTRFS_I(inode)->csum_bytes;
5522         spin_unlock(&BTRFS_I(inode)->lock);
5523
5524         if (root->fs_info->quota_enabled) {
5525                 ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5526                 if (ret)
5527                         goto out_fail;
5528         }
5529
5530         ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5531         if (unlikely(ret)) {
5532                 if (root->fs_info->quota_enabled)
5533                         btrfs_qgroup_free(root, nr_extents * root->nodesize);
5534                 goto out_fail;
5535         }
5536
5537         spin_lock(&BTRFS_I(inode)->lock);
5538         if (extra_reserve) {
5539                 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5540                         &BTRFS_I(inode)->runtime_flags);
5541                 nr_extents--;
5542         }
5543         BTRFS_I(inode)->reserved_extents += nr_extents;
5544         spin_unlock(&BTRFS_I(inode)->lock);
5545
5546         if (delalloc_lock)
5547                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5548
5549         if (to_reserve)
5550                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5551                                               btrfs_ino(inode), to_reserve, 1);
5552         block_rsv_add_bytes(block_rsv, to_reserve, 1);
5553
5554         return 0;
5555
5556 out_fail:
5557         spin_lock(&BTRFS_I(inode)->lock);
5558         dropped = drop_outstanding_extent(inode, num_bytes);
5559         /*
5560          * If the inodes csum_bytes is the same as the original
5561          * csum_bytes then we know we haven't raced with any free()ers
5562          * so we can just reduce our inodes csum bytes and carry on.
5563          */
5564         if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5565                 calc_csum_metadata_size(inode, num_bytes, 0);
5566         } else {
5567                 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5568                 u64 bytes;
5569
5570                 /*
5571                  * This is tricky, but first we need to figure out how much we
5572                  * free'd from any free-ers that occured during this
5573                  * reservation, so we reset ->csum_bytes to the csum_bytes
5574                  * before we dropped our lock, and then call the free for the
5575                  * number of bytes that were freed while we were trying our
5576                  * reservation.
5577                  */
5578                 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5579                 BTRFS_I(inode)->csum_bytes = csum_bytes;
5580                 to_free = calc_csum_metadata_size(inode, bytes, 0);
5581
5582
5583                 /*
5584                  * Now we need to see how much we would have freed had we not
5585                  * been making this reservation and our ->csum_bytes were not
5586                  * artificially inflated.
5587                  */
5588                 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5589                 bytes = csum_bytes - orig_csum_bytes;
5590                 bytes = calc_csum_metadata_size(inode, bytes, 0);
5591
5592                 /*
5593                  * Now reset ->csum_bytes to what it should be.  If bytes is
5594                  * more than to_free then we would have free'd more space had we
5595                  * not had an artificially high ->csum_bytes, so we need to free
5596                  * the remainder.  If bytes is the same or less then we don't
5597                  * need to do anything, the other free-ers did the correct
5598                  * thing.
5599                  */
5600                 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5601                 if (bytes > to_free)
5602                         to_free = bytes - to_free;
5603                 else
5604                         to_free = 0;
5605         }
5606         spin_unlock(&BTRFS_I(inode)->lock);
5607         if (dropped)
5608                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5609
5610         if (to_free) {
5611                 btrfs_block_rsv_release(root, block_rsv, to_free);
5612                 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5613                                               btrfs_ino(inode), to_free, 0);
5614         }
5615         if (delalloc_lock)
5616                 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5617         return ret;
5618 }
5619
5620 /**
5621  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5622  * @inode: the inode to release the reservation for
5623  * @num_bytes: the number of bytes we're releasing
5624  *
5625  * This will release the metadata reservation for an inode.  This can be called
5626  * once we complete IO for a given set of bytes to release their metadata
5627  * reservations.
5628  */
5629 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5630 {
5631         struct btrfs_root *root = BTRFS_I(inode)->root;
5632         u64 to_free = 0;
5633         unsigned dropped;
5634
5635         num_bytes = ALIGN(num_bytes, root->sectorsize);
5636         spin_lock(&BTRFS_I(inode)->lock);
5637         dropped = drop_outstanding_extent(inode, num_bytes);
5638
5639         if (num_bytes)
5640                 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5641         spin_unlock(&BTRFS_I(inode)->lock);
5642         if (dropped > 0)
5643                 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5644
5645         if (btrfs_test_is_dummy_root(root))
5646                 return;
5647
5648         trace_btrfs_space_reservation(root->fs_info, "delalloc",
5649                                       btrfs_ino(inode), to_free, 0);
5650
5651         btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5652                                 to_free);
5653 }
5654
5655 /**
5656  * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
5657  * @inode: inode we're writing to
5658  * @num_bytes: the number of bytes we want to allocate
5659  *
5660  * This will do the following things
5661  *
5662  * o reserve space in the data space info for num_bytes
5663  * o reserve space in the metadata space info based on number of outstanding
5664  *   extents and how much csums will be needed
5665  * o add to the inodes ->delalloc_bytes
5666  * o add it to the fs_info's delalloc inodes list.
5667  *
5668  * This will return 0 for success and -ENOSPC if there is no space left.
5669  */
5670 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
5671 {
5672         int ret;
5673
5674         ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
5675         if (ret)
5676                 return ret;
5677
5678         ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
5679         if (ret) {
5680                 btrfs_free_reserved_data_space(inode, num_bytes);
5681                 return ret;
5682         }
5683
5684         return 0;
5685 }
5686
5687 /**
5688  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5689  * @inode: inode we're releasing space for
5690  * @num_bytes: the number of bytes we want to free up
5691  *
5692  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5693  * called in the case that we don't need the metadata AND data reservations
5694  * anymore.  So if there is an error or we insert an inline extent.
5695  *
5696  * This function will release the metadata space that was not used and will
5697  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5698  * list if there are no delalloc bytes left.
5699  */
5700 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
5701 {
5702         btrfs_delalloc_release_metadata(inode, num_bytes);
5703         btrfs_free_reserved_data_space(inode, num_bytes);
5704 }
5705
5706 static int update_block_group(struct btrfs_trans_handle *trans,
5707                               struct btrfs_root *root, u64 bytenr,
5708                               u64 num_bytes, int alloc)
5709 {
5710         struct btrfs_block_group_cache *cache = NULL;
5711         struct btrfs_fs_info *info = root->fs_info;
5712         u64 total = num_bytes;
5713         u64 old_val;
5714         u64 byte_in_group;
5715         int factor;
5716
5717         /* block accounting for super block */
5718         spin_lock(&info->delalloc_root_lock);
5719         old_val = btrfs_super_bytes_used(info->super_copy);
5720         if (alloc)
5721                 old_val += num_bytes;
5722         else
5723                 old_val -= num_bytes;
5724         btrfs_set_super_bytes_used(info->super_copy, old_val);
5725         spin_unlock(&info->delalloc_root_lock);
5726
5727         while (total) {
5728                 cache = btrfs_lookup_block_group(info, bytenr);
5729                 if (!cache)
5730                         return -ENOENT;
5731                 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5732                                     BTRFS_BLOCK_GROUP_RAID1 |
5733                                     BTRFS_BLOCK_GROUP_RAID10))
5734                         factor = 2;
5735                 else
5736                         factor = 1;
5737                 /*
5738                  * If this block group has free space cache written out, we
5739                  * need to make sure to load it if we are removing space.  This
5740                  * is because we need the unpinning stage to actually add the
5741                  * space back to the block group, otherwise we will leak space.
5742                  */
5743                 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5744                         cache_block_group(cache, 1);
5745
5746                 byte_in_group = bytenr - cache->key.objectid;
5747                 WARN_ON(byte_in_group > cache->key.offset);
5748
5749                 spin_lock(&cache->space_info->lock);
5750                 spin_lock(&cache->lock);
5751
5752                 if (btrfs_test_opt(root, SPACE_CACHE) &&
5753                     cache->disk_cache_state < BTRFS_DC_CLEAR)
5754                         cache->disk_cache_state = BTRFS_DC_CLEAR;
5755
5756                 old_val = btrfs_block_group_used(&cache->item);
5757                 num_bytes = min(total, cache->key.offset - byte_in_group);
5758                 if (alloc) {
5759                         old_val += num_bytes;
5760                         btrfs_set_block_group_used(&cache->item, old_val);
5761                         cache->reserved -= num_bytes;
5762                         cache->space_info->bytes_reserved -= num_bytes;
5763                         cache->space_info->bytes_used += num_bytes;
5764                         cache->space_info->disk_used += num_bytes * factor;
5765                         spin_unlock(&cache->lock);
5766                         spin_unlock(&cache->space_info->lock);
5767                 } else {
5768                         old_val -= num_bytes;
5769                         btrfs_set_block_group_used(&cache->item, old_val);
5770                         cache->pinned += num_bytes;
5771                         cache->space_info->bytes_pinned += num_bytes;
5772                         cache->space_info->bytes_used -= num_bytes;
5773                         cache->space_info->disk_used -= num_bytes * factor;
5774                         spin_unlock(&cache->lock);
5775                         spin_unlock(&cache->space_info->lock);
5776
5777                         set_extent_dirty(info->pinned_extents,
5778                                          bytenr, bytenr + num_bytes - 1,
5779                                          GFP_NOFS | __GFP_NOFAIL);
5780                         /*
5781                          * No longer have used bytes in this block group, queue
5782                          * it for deletion.
5783                          */
5784                         if (old_val == 0) {
5785                                 spin_lock(&info->unused_bgs_lock);
5786                                 if (list_empty(&cache->bg_list)) {
5787                                         btrfs_get_block_group(cache);
5788                                         list_add_tail(&cache->bg_list,
5789                                                       &info->unused_bgs);
5790                                 }
5791                                 spin_unlock(&info->unused_bgs_lock);
5792                         }
5793                 }
5794
5795                 spin_lock(&trans->transaction->dirty_bgs_lock);
5796                 if (list_empty(&cache->dirty_list)) {
5797                         list_add_tail(&cache->dirty_list,
5798                                       &trans->transaction->dirty_bgs);
5799                                 trans->transaction->num_dirty_bgs++;
5800                         btrfs_get_block_group(cache);
5801                 }
5802                 spin_unlock(&trans->transaction->dirty_bgs_lock);
5803
5804                 btrfs_put_block_group(cache);
5805                 total -= num_bytes;
5806                 bytenr += num_bytes;
5807         }
5808         return 0;
5809 }
5810
5811 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5812 {
5813         struct btrfs_block_group_cache *cache;
5814         u64 bytenr;
5815
5816         spin_lock(&root->fs_info->block_group_cache_lock);
5817         bytenr = root->fs_info->first_logical_byte;
5818         spin_unlock(&root->fs_info->block_group_cache_lock);
5819
5820         if (bytenr < (u64)-1)
5821                 return bytenr;
5822
5823         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5824         if (!cache)
5825                 return 0;
5826
5827         bytenr = cache->key.objectid;
5828         btrfs_put_block_group(cache);
5829
5830         return bytenr;
5831 }
5832
5833 static int pin_down_extent(struct btrfs_root *root,
5834                            struct btrfs_block_group_cache *cache,
5835                            u64 bytenr, u64 num_bytes, int reserved)
5836 {
5837         spin_lock(&cache->space_info->lock);
5838         spin_lock(&cache->lock);
5839         cache->pinned += num_bytes;
5840         cache->space_info->bytes_pinned += num_bytes;
5841         if (reserved) {
5842                 cache->reserved -= num_bytes;
5843                 cache->space_info->bytes_reserved -= num_bytes;
5844         }
5845         spin_unlock(&cache->lock);
5846         spin_unlock(&cache->space_info->lock);
5847
5848         set_extent_dirty(root->fs_info->pinned_extents, bytenr,
5849                          bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5850         if (reserved)
5851                 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5852         return 0;
5853 }
5854
5855 /*
5856  * this function must be called within transaction
5857  */
5858 int btrfs_pin_extent(struct btrfs_root *root,
5859                      u64 bytenr, u64 num_bytes, int reserved)
5860 {
5861         struct btrfs_block_group_cache *cache;
5862
5863         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5864         BUG_ON(!cache); /* Logic error */
5865
5866         pin_down_extent(root, cache, bytenr, num_bytes, reserved);
5867
5868         btrfs_put_block_group(cache);
5869         return 0;
5870 }
5871
5872 /*
5873  * this function must be called within transaction
5874  */
5875 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5876                                     u64 bytenr, u64 num_bytes)
5877 {
5878         struct btrfs_block_group_cache *cache;
5879         int ret;
5880
5881         cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5882         if (!cache)
5883                 return -EINVAL;
5884
5885         /*
5886          * pull in the free space cache (if any) so that our pin
5887          * removes the free space from the cache.  We have load_only set
5888          * to one because the slow code to read in the free extents does check
5889          * the pinned extents.
5890          */
5891         cache_block_group(cache, 1);
5892
5893         pin_down_extent(root, cache, bytenr, num_bytes, 0);
5894
5895         /* remove us from the free space cache (if we're there at all) */
5896         ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5897         btrfs_put_block_group(cache);
5898         return ret;
5899 }
5900
5901 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
5902 {
5903         int ret;
5904         struct btrfs_block_group_cache *block_group;
5905         struct btrfs_caching_control *caching_ctl;
5906
5907         block_group = btrfs_lookup_block_group(root->fs_info, start);
5908         if (!block_group)
5909                 return -EINVAL;
5910
5911         cache_block_group(block_group, 0);
5912         caching_ctl = get_caching_control(block_group);
5913
5914         if (!caching_ctl) {
5915                 /* Logic error */
5916                 BUG_ON(!block_group_cache_done(block_group));
5917                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5918         } else {
5919                 mutex_lock(&caching_ctl->mutex);
5920
5921                 if (start >= caching_ctl->progress) {
5922                         ret = add_excluded_extent(root, start, num_bytes);
5923                 } else if (start + num_bytes <= caching_ctl->progress) {
5924                         ret = btrfs_remove_free_space(block_group,
5925                                                       start, num_bytes);
5926                 } else {
5927                         num_bytes = caching_ctl->progress - start;
5928                         ret = btrfs_remove_free_space(block_group,
5929                                                       start, num_bytes);
5930                         if (ret)
5931                                 goto out_lock;
5932
5933                         num_bytes = (start + num_bytes) -
5934                                 caching_ctl->progress;
5935                         start = caching_ctl->progress;
5936                         ret = add_excluded_extent(root, start, num_bytes);
5937                 }
5938 out_lock:
5939                 mutex_unlock(&caching_ctl->mutex);
5940                 put_caching_control(caching_ctl);
5941         }
5942         btrfs_put_block_group(block_group);
5943         return ret;
5944 }
5945
5946 int btrfs_exclude_logged_extents(struct btrfs_root *log,
5947                                  struct extent_buffer *eb)
5948 {
5949         struct btrfs_file_extent_item *item;
5950         struct btrfs_key key;
5951         int found_type;
5952         int i;
5953
5954         if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
5955                 return 0;
5956
5957         for (i = 0; i < btrfs_header_nritems(eb); i++) {
5958                 btrfs_item_key_to_cpu(eb, &key, i);
5959                 if (key.type != BTRFS_EXTENT_DATA_KEY)
5960                         continue;
5961                 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
5962                 found_type = btrfs_file_extent_type(eb, item);
5963                 if (found_type == BTRFS_FILE_EXTENT_INLINE)
5964                         continue;
5965                 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
5966                         continue;
5967                 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
5968                 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
5969                 __exclude_logged_extent(log, key.objectid, key.offset);
5970         }
5971
5972         return 0;
5973 }
5974
5975 /**
5976  * btrfs_update_reserved_bytes - update the block_group and space info counters
5977  * @cache:      The cache we are manipulating
5978  * @num_bytes:  The number of bytes in question
5979  * @reserve:    One of the reservation enums
5980  * @delalloc:   The blocks are allocated for the delalloc write
5981  *
5982  * This is called by the allocator when it reserves space, or by somebody who is
5983  * freeing space that was never actually used on disk.  For example if you
5984  * reserve some space for a new leaf in transaction A and before transaction A
5985  * commits you free that leaf, you call this with reserve set to 0 in order to
5986  * clear the reservation.
5987  *
5988  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
5989  * ENOSPC accounting.  For data we handle the reservation through clearing the
5990  * delalloc bits in the io_tree.  We have to do this since we could end up
5991  * allocating less disk space for the amount of data we have reserved in the
5992  * case of compression.
5993  *
5994  * If this is a reservation and the block group has become read only we cannot
5995  * make the reservation and return -EAGAIN, otherwise this function always
5996  * succeeds.
5997  */
5998 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
5999                                        u64 num_bytes, int reserve, int delalloc)
6000 {
6001         struct btrfs_space_info *space_info = cache->space_info;
6002         int ret = 0;
6003
6004         spin_lock(&space_info->lock);
6005         spin_lock(&cache->lock);
6006         if (reserve != RESERVE_FREE) {
6007                 if (cache->ro) {
6008                         ret = -EAGAIN;
6009                 } else {
6010                         cache->reserved += num_bytes;
6011                         space_info->bytes_reserved += num_bytes;
6012                         if (reserve == RESERVE_ALLOC) {
6013                                 trace_btrfs_space_reservation(cache->fs_info,
6014                                                 "space_info", space_info->flags,
6015                                                 num_bytes, 0);
6016                                 space_info->bytes_may_use -= num_bytes;
6017                         }
6018
6019                         if (delalloc)
6020                                 cache->delalloc_bytes += num_bytes;
6021                 }
6022         } else {
6023                 if (cache->ro)
6024                         space_info->bytes_readonly += num_bytes;
6025                 cache->reserved -= num_bytes;
6026                 space_info->bytes_reserved -= num_bytes;
6027
6028                 if (delalloc)
6029                         cache->delalloc_bytes -= num_bytes;
6030         }
6031         spin_unlock(&cache->lock);
6032         spin_unlock(&space_info->lock);
6033         return ret;
6034 }
6035
6036 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6037                                 struct btrfs_root *root)
6038 {
6039         struct btrfs_fs_info *fs_info = root->fs_info;
6040         struct btrfs_caching_control *next;
6041         struct btrfs_caching_control *caching_ctl;
6042         struct btrfs_block_group_cache *cache;
6043
6044         down_write(&fs_info->commit_root_sem);
6045
6046         list_for_each_entry_safe(caching_ctl, next,
6047                                  &fs_info->caching_block_groups, list) {
6048                 cache = caching_ctl->block_group;
6049                 if (block_group_cache_done(cache)) {
6050                         cache->last_byte_to_unpin = (u64)-1;
6051                         list_del_init(&caching_ctl->list);
6052                         put_caching_control(caching_ctl);
6053                 } else {
6054                         cache->last_byte_to_unpin = caching_ctl->progress;
6055                 }
6056         }
6057
6058         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6059                 fs_info->pinned_extents = &fs_info->freed_extents[1];
6060         else
6061                 fs_info->pinned_extents = &fs_info->freed_extents[0];
6062
6063         up_write(&fs_info->commit_root_sem);
6064
6065         update_global_block_rsv(fs_info);
6066 }
6067
6068 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6069                               const bool return_free_space)
6070 {
6071         struct btrfs_fs_info *fs_info = root->fs_info;
6072         struct btrfs_block_group_cache *cache = NULL;
6073         struct btrfs_space_info *space_info;
6074         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6075         u64 len;
6076         bool readonly;
6077
6078         while (start <= end) {
6079                 readonly = false;
6080                 if (!cache ||
6081                     start >= cache->key.objectid + cache->key.offset) {
6082                         if (cache)
6083                                 btrfs_put_block_group(cache);
6084                         cache = btrfs_lookup_block_group(fs_info, start);
6085                         BUG_ON(!cache); /* Logic error */
6086                 }
6087
6088                 len = cache->key.objectid + cache->key.offset - start;
6089                 len = min(len, end + 1 - start);
6090
6091                 if (start < cache->last_byte_to_unpin) {
6092                         len = min(len, cache->last_byte_to_unpin - start);
6093                         if (return_free_space)
6094                                 btrfs_add_free_space(cache, start, len);
6095                 }
6096
6097                 start += len;
6098                 space_info = cache->space_info;
6099
6100                 spin_lock(&space_info->lock);
6101                 spin_lock(&cache->lock);
6102                 cache->pinned -= len;
6103                 space_info->bytes_pinned -= len;
6104                 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6105                 if (cache->ro) {
6106                         space_info->bytes_readonly += len;
6107                         readonly = true;
6108                 }
6109                 spin_unlock(&cache->lock);
6110                 if (!readonly && global_rsv->space_info == space_info) {
6111                         spin_lock(&global_rsv->lock);
6112                         if (!global_rsv->full) {
6113                                 len = min(len, global_rsv->size -
6114                                           global_rsv->reserved);
6115                                 global_rsv->reserved += len;
6116                                 space_info->bytes_may_use += len;
6117                                 if (global_rsv->reserved >= global_rsv->size)
6118                                         global_rsv->full = 1;
6119                         }
6120                         spin_unlock(&global_rsv->lock);
6121                 }
6122                 spin_unlock(&space_info->lock);
6123         }
6124
6125         if (cache)
6126                 btrfs_put_block_group(cache);
6127         return 0;
6128 }
6129
6130 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6131                                struct btrfs_root *root)
6132 {
6133         struct btrfs_fs_info *fs_info = root->fs_info;
6134         struct btrfs_block_group_cache *block_group, *tmp;
6135         struct list_head *deleted_bgs;
6136         struct extent_io_tree *unpin;
6137         u64 start;
6138         u64 end;
6139         int ret;
6140
6141         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6142                 unpin = &fs_info->freed_extents[1];
6143         else
6144                 unpin = &fs_info->freed_extents[0];
6145
6146         while (!trans->aborted) {
6147                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6148                 ret = find_first_extent_bit(unpin, 0, &start, &end,
6149                                             EXTENT_DIRTY, NULL);
6150                 if (ret) {
6151                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6152                         break;
6153                 }
6154
6155                 if (btrfs_test_opt(root, DISCARD))
6156                         ret = btrfs_discard_extent(root, start,
6157                                                    end + 1 - start, NULL);
6158
6159                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6160                 unpin_extent_range(root, start, end, true);
6161                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6162                 cond_resched();
6163         }
6164
6165         /*
6166          * Transaction is finished.  We don't need the lock anymore.  We
6167          * do need to clean up the block groups in case of a transaction
6168          * abort.
6169          */
6170         deleted_bgs = &trans->transaction->deleted_bgs;
6171         list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6172                 u64 trimmed = 0;
6173
6174                 ret = -EROFS;
6175                 if (!trans->aborted)
6176                         ret = btrfs_discard_extent(root,
6177                                                    block_group->key.objectid,
6178                                                    block_group->key.offset,
6179                                                    &trimmed);
6180
6181                 list_del_init(&block_group->bg_list);
6182                 btrfs_put_block_group_trimming(block_group);
6183                 btrfs_put_block_group(block_group);
6184
6185                 if (ret) {
6186                         const char *errstr = btrfs_decode_error(ret);
6187                         btrfs_warn(fs_info,
6188                                    "Discard failed while removing blockgroup: errno=%d %s\n",
6189                                    ret, errstr);
6190                 }
6191         }
6192
6193         return 0;
6194 }
6195
6196 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6197                              u64 owner, u64 root_objectid)
6198 {
6199         struct btrfs_space_info *space_info;
6200         u64 flags;
6201
6202         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6203                 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6204                         flags = BTRFS_BLOCK_GROUP_SYSTEM;
6205                 else
6206                         flags = BTRFS_BLOCK_GROUP_METADATA;
6207         } else {
6208                 flags = BTRFS_BLOCK_GROUP_DATA;
6209         }
6210
6211         space_info = __find_space_info(fs_info, flags);
6212         BUG_ON(!space_info); /* Logic bug */
6213         percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6214 }
6215
6216
6217 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6218                                 struct btrfs_root *root,
6219                                 struct btrfs_delayed_ref_node *node, u64 parent,
6220                                 u64 root_objectid, u64 owner_objectid,
6221                                 u64 owner_offset, int refs_to_drop,
6222                                 struct btrfs_delayed_extent_op *extent_op)
6223 {
6224         struct btrfs_key key;
6225         struct btrfs_path *path;
6226         struct btrfs_fs_info *info = root->fs_info;
6227         struct btrfs_root *extent_root = info->extent_root;
6228         struct extent_buffer *leaf;
6229         struct btrfs_extent_item *ei;
6230         struct btrfs_extent_inline_ref *iref;
6231         int ret;
6232         int is_data;
6233         int extent_slot = 0;
6234         int found_extent = 0;
6235         int num_to_del = 1;
6236         int no_quota = node->no_quota;
6237         u32 item_size;
6238         u64 refs;
6239         u64 bytenr = node->bytenr;
6240         u64 num_bytes = node->num_bytes;
6241         int last_ref = 0;
6242         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6243                                                  SKINNY_METADATA);
6244
6245         if (!info->quota_enabled || !is_fstree(root_objectid))
6246                 no_quota = 1;
6247
6248         path = btrfs_alloc_path();
6249         if (!path)
6250                 return -ENOMEM;
6251
6252         path->reada = 1;
6253         path->leave_spinning = 1;
6254
6255         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6256         BUG_ON(!is_data && refs_to_drop != 1);
6257
6258         if (is_data)
6259                 skinny_metadata = 0;
6260
6261         ret = lookup_extent_backref(trans, extent_root, path, &iref,
6262                                     bytenr, num_bytes, parent,
6263                                     root_objectid, owner_objectid,
6264                                     owner_offset);
6265         if (ret == 0) {
6266                 extent_slot = path->slots[0];
6267                 while (extent_slot >= 0) {
6268                         btrfs_item_key_to_cpu(path->nodes[0], &key,
6269                                               extent_slot);
6270                         if (key.objectid != bytenr)
6271                                 break;
6272                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6273                             key.offset == num_bytes) {
6274                                 found_extent = 1;
6275                                 break;
6276                         }
6277                         if (key.type == BTRFS_METADATA_ITEM_KEY &&
6278                             key.offset == owner_objectid) {
6279                                 found_extent = 1;
6280                                 break;
6281                         }
6282                         if (path->slots[0] - extent_slot > 5)
6283                                 break;
6284                         extent_slot--;
6285                 }
6286 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6287                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6288                 if (found_extent && item_size < sizeof(*ei))
6289                         found_extent = 0;
6290 #endif
6291                 if (!found_extent) {
6292                         BUG_ON(iref);
6293                         ret = remove_extent_backref(trans, extent_root, path,
6294                                                     NULL, refs_to_drop,
6295                                                     is_data, &last_ref);
6296                         if (ret) {
6297                                 btrfs_abort_transaction(trans, extent_root, ret);
6298                                 goto out;
6299                         }
6300                         btrfs_release_path(path);
6301                         path->leave_spinning = 1;
6302
6303                         key.objectid = bytenr;
6304                         key.type = BTRFS_EXTENT_ITEM_KEY;
6305                         key.offset = num_bytes;
6306
6307                         if (!is_data && skinny_metadata) {
6308                                 key.type = BTRFS_METADATA_ITEM_KEY;
6309                                 key.offset = owner_objectid;
6310                         }
6311
6312                         ret = btrfs_search_slot(trans, extent_root,
6313                                                 &key, path, -1, 1);
6314                         if (ret > 0 && skinny_metadata && path->slots[0]) {
6315                                 /*
6316                                  * Couldn't find our skinny metadata item,
6317                                  * see if we have ye olde extent item.
6318                                  */
6319                                 path->slots[0]--;
6320                                 btrfs_item_key_to_cpu(path->nodes[0], &key,
6321                                                       path->slots[0]);
6322                                 if (key.objectid == bytenr &&
6323                                     key.type == BTRFS_EXTENT_ITEM_KEY &&
6324                                     key.offset == num_bytes)
6325                                         ret = 0;
6326                         }
6327
6328                         if (ret > 0 && skinny_metadata) {
6329                                 skinny_metadata = false;
6330                                 key.objectid = bytenr;
6331                                 key.type = BTRFS_EXTENT_ITEM_KEY;
6332                                 key.offset = num_bytes;
6333                                 btrfs_release_path(path);
6334                                 ret = btrfs_search_slot(trans, extent_root,
6335                                                         &key, path, -1, 1);
6336                         }
6337
6338                         if (ret) {
6339                                 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6340                                         ret, bytenr);
6341                                 if (ret > 0)
6342                                         btrfs_print_leaf(extent_root,
6343                                                          path->nodes[0]);
6344                         }
6345                         if (ret < 0) {
6346                                 btrfs_abort_transaction(trans, extent_root, ret);
6347                                 goto out;
6348                         }
6349                         extent_slot = path->slots[0];
6350                 }
6351         } else if (WARN_ON(ret == -ENOENT)) {
6352                 btrfs_print_leaf(extent_root, path->nodes[0]);
6353                 btrfs_err(info,
6354                         "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6355                         bytenr, parent, root_objectid, owner_objectid,
6356                         owner_offset);
6357                 btrfs_abort_transaction(trans, extent_root, ret);
6358                 goto out;
6359         } else {
6360                 btrfs_abort_transaction(trans, extent_root, ret);
6361                 goto out;
6362         }
6363
6364         leaf = path->nodes[0];
6365         item_size = btrfs_item_size_nr(leaf, extent_slot);
6366 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6367         if (item_size < sizeof(*ei)) {
6368                 BUG_ON(found_extent || extent_slot != path->slots[0]);
6369                 ret = convert_extent_item_v0(trans, extent_root, path,
6370                                              owner_objectid, 0);
6371                 if (ret < 0) {
6372                         btrfs_abort_transaction(trans, extent_root, ret);
6373                         goto out;
6374                 }
6375
6376                 btrfs_release_path(path);
6377                 path->leave_spinning = 1;
6378
6379                 key.objectid = bytenr;
6380                 key.type = BTRFS_EXTENT_ITEM_KEY;
6381                 key.offset = num_bytes;
6382
6383                 ret = btrfs_search_slot(trans, extent_root, &key, path,
6384                                         -1, 1);
6385                 if (ret) {
6386                         btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6387                                 ret, bytenr);
6388                         btrfs_print_leaf(extent_root, path->nodes[0]);
6389                 }
6390                 if (ret < 0) {
6391                         btrfs_abort_transaction(trans, extent_root, ret);
6392                         goto out;
6393                 }
6394
6395                 extent_slot = path->slots[0];
6396                 leaf = path->nodes[0];
6397                 item_size = btrfs_item_size_nr(leaf, extent_slot);
6398         }
6399 #endif
6400         BUG_ON(item_size < sizeof(*ei));
6401         ei = btrfs_item_ptr(leaf, extent_slot,
6402                             struct btrfs_extent_item);
6403         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6404             key.type == BTRFS_EXTENT_ITEM_KEY) {
6405                 struct btrfs_tree_block_info *bi;
6406                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6407                 bi = (struct btrfs_tree_block_info *)(ei + 1);
6408                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6409         }
6410
6411         refs = btrfs_extent_refs(leaf, ei);
6412         if (refs < refs_to_drop) {
6413                 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6414                           "for bytenr %Lu", refs_to_drop, refs, bytenr);
6415                 ret = -EINVAL;
6416                 btrfs_abort_transaction(trans, extent_root, ret);
6417                 goto out;
6418         }
6419         refs -= refs_to_drop;
6420
6421         if (refs > 0) {
6422                 if (extent_op)
6423                         __run_delayed_extent_op(extent_op, leaf, ei);
6424                 /*
6425                  * In the case of inline back ref, reference count will
6426                  * be updated by remove_extent_backref
6427                  */
6428                 if (iref) {
6429                         BUG_ON(!found_extent);
6430                 } else {
6431                         btrfs_set_extent_refs(leaf, ei, refs);
6432                         btrfs_mark_buffer_dirty(leaf);
6433                 }
6434                 if (found_extent) {
6435                         ret = remove_extent_backref(trans, extent_root, path,
6436                                                     iref, refs_to_drop,
6437                                                     is_data, &last_ref);
6438                         if (ret) {
6439                                 btrfs_abort_transaction(trans, extent_root, ret);
6440                                 goto out;
6441                         }
6442                 }
6443                 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6444                                  root_objectid);
6445         } else {
6446                 if (found_extent) {
6447                         BUG_ON(is_data && refs_to_drop !=
6448                                extent_data_ref_count(path, iref));
6449                         if (iref) {
6450                                 BUG_ON(path->slots[0] != extent_slot);
6451                         } else {
6452                                 BUG_ON(path->slots[0] != extent_slot + 1);
6453                                 path->slots[0] = extent_slot;
6454                                 num_to_del = 2;
6455                         }
6456                 }
6457
6458                 last_ref = 1;
6459                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6460                                       num_to_del);
6461                 if (ret) {
6462                         btrfs_abort_transaction(trans, extent_root, ret);
6463                         goto out;
6464                 }
6465                 btrfs_release_path(path);
6466
6467                 if (is_data) {
6468                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6469                         if (ret) {
6470                                 btrfs_abort_transaction(trans, extent_root, ret);
6471                                 goto out;
6472                         }
6473                 }
6474
6475                 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6476                 if (ret) {
6477                         btrfs_abort_transaction(trans, extent_root, ret);
6478                         goto out;
6479                 }
6480         }
6481         btrfs_release_path(path);
6482
6483 out:
6484         btrfs_free_path(path);
6485         return ret;
6486 }
6487
6488 /*
6489  * when we free an block, it is possible (and likely) that we free the last
6490  * delayed ref for that extent as well.  This searches the delayed ref tree for
6491  * a given extent, and if there are no other delayed refs to be processed, it
6492  * removes it from the tree.
6493  */
6494 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6495                                       struct btrfs_root *root, u64 bytenr)
6496 {
6497         struct btrfs_delayed_ref_head *head;
6498         struct btrfs_delayed_ref_root *delayed_refs;
6499         int ret = 0;
6500
6501         delayed_refs = &trans->transaction->delayed_refs;
6502         spin_lock(&delayed_refs->lock);
6503         head = btrfs_find_delayed_ref_head(trans, bytenr);
6504         if (!head)
6505                 goto out_delayed_unlock;
6506
6507         spin_lock(&head->lock);
6508         if (!list_empty(&head->ref_list))
6509                 goto out;
6510
6511         if (head->extent_op) {
6512                 if (!head->must_insert_reserved)
6513                         goto out;
6514                 btrfs_free_delayed_extent_op(head->extent_op);
6515                 head->extent_op = NULL;
6516         }
6517
6518         /*
6519          * waiting for the lock here would deadlock.  If someone else has it
6520          * locked they are already in the process of dropping it anyway
6521          */
6522         if (!mutex_trylock(&head->mutex))
6523                 goto out;
6524
6525         /*
6526          * at this point we have a head with no other entries.  Go
6527          * ahead and process it.
6528          */
6529         head->node.in_tree = 0;
6530         rb_erase(&head->href_node, &delayed_refs->href_root);
6531
6532         atomic_dec(&delayed_refs->num_entries);
6533
6534         /*
6535          * we don't take a ref on the node because we're removing it from the
6536          * tree, so we just steal the ref the tree was holding.
6537          */
6538         delayed_refs->num_heads--;
6539         if (head->processing == 0)
6540                 delayed_refs->num_heads_ready--;
6541         head->processing = 0;
6542         spin_unlock(&head->lock);
6543         spin_unlock(&delayed_refs->lock);
6544
6545         BUG_ON(head->extent_op);
6546         if (head->must_insert_reserved)
6547                 ret = 1;
6548
6549         mutex_unlock(&head->mutex);
6550         btrfs_put_delayed_ref(&head->node);
6551         return ret;
6552 out:
6553         spin_unlock(&head->lock);
6554
6555 out_delayed_unlock:
6556         spin_unlock(&delayed_refs->lock);
6557         return 0;
6558 }
6559
6560 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6561                            struct btrfs_root *root,
6562                            struct extent_buffer *buf,
6563                            u64 parent, int last_ref)
6564 {
6565         int pin = 1;
6566         int ret;
6567
6568         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6569                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6570                                         buf->start, buf->len,
6571                                         parent, root->root_key.objectid,
6572                                         btrfs_header_level(buf),
6573                                         BTRFS_DROP_DELAYED_REF, NULL, 0);
6574                 BUG_ON(ret); /* -ENOMEM */
6575         }
6576
6577         if (!last_ref)
6578                 return;
6579
6580         if (btrfs_header_generation(buf) == trans->transid) {
6581                 struct btrfs_block_group_cache *cache;
6582
6583                 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6584                         ret = check_ref_cleanup(trans, root, buf->start);
6585                         if (!ret)
6586                                 goto out;
6587                 }
6588
6589                 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6590
6591                 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6592                         pin_down_extent(root, cache, buf->start, buf->len, 1);
6593                         btrfs_put_block_group(cache);
6594                         goto out;
6595                 }
6596
6597                 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6598
6599                 btrfs_add_free_space(cache, buf->start, buf->len);
6600                 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6601                 btrfs_put_block_group(cache);
6602                 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6603                 pin = 0;
6604         }
6605 out:
6606         if (pin)
6607                 add_pinned_bytes(root->fs_info, buf->len,
6608                                  btrfs_header_level(buf),
6609                                  root->root_key.objectid);
6610
6611         /*
6612          * Deleting the buffer, clear the corrupt flag since it doesn't matter
6613          * anymore.
6614          */
6615         clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6616 }
6617
6618 /* Can return -ENOMEM */
6619 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6620                       u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6621                       u64 owner, u64 offset, int no_quota)
6622 {
6623         int ret;
6624         struct btrfs_fs_info *fs_info = root->fs_info;
6625
6626         if (btrfs_test_is_dummy_root(root))
6627                 return 0;
6628
6629         add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6630
6631         /*
6632          * tree log blocks never actually go into the extent allocation
6633          * tree, just update pinning info and exit early.
6634          */
6635         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6636                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6637                 /* unlocks the pinned mutex */
6638                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6639                 ret = 0;
6640         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6641                 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6642                                         num_bytes,
6643                                         parent, root_objectid, (int)owner,
6644                                         BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6645         } else {
6646                 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6647                                                 num_bytes,
6648                                                 parent, root_objectid, owner,
6649                                                 offset, BTRFS_DROP_DELAYED_REF,
6650                                                 NULL, no_quota);
6651         }
6652         return ret;
6653 }
6654
6655 /*
6656  * when we wait for progress in the block group caching, its because
6657  * our allocation attempt failed at least once.  So, we must sleep
6658  * and let some progress happen before we try again.
6659  *
6660  * This function will sleep at least once waiting for new free space to
6661  * show up, and then it will check the block group free space numbers
6662  * for our min num_bytes.  Another option is to have it go ahead
6663  * and look in the rbtree for a free extent of a given size, but this
6664  * is a good start.
6665  *
6666  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6667  * any of the information in this block group.
6668  */
6669 static noinline void
6670 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6671                                 u64 num_bytes)
6672 {
6673         struct btrfs_caching_control *caching_ctl;
6674
6675         caching_ctl = get_caching_control(cache);
6676         if (!caching_ctl)
6677                 return;
6678
6679         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6680                    (cache->free_space_ctl->free_space >= num_bytes));
6681
6682         put_caching_control(caching_ctl);
6683 }
6684
6685 static noinline int
6686 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6687 {
6688         struct btrfs_caching_control *caching_ctl;
6689         int ret = 0;
6690
6691         caching_ctl = get_caching_control(cache);
6692         if (!caching_ctl)
6693                 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6694
6695         wait_event(caching_ctl->wait, block_group_cache_done(cache));
6696         if (cache->cached == BTRFS_CACHE_ERROR)
6697                 ret = -EIO;
6698         put_caching_control(caching_ctl);
6699         return ret;
6700 }
6701
6702 int __get_raid_index(u64 flags)
6703 {
6704         if (flags & BTRFS_BLOCK_GROUP_RAID10)
6705                 return BTRFS_RAID_RAID10;
6706         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6707                 return BTRFS_RAID_RAID1;
6708         else if (flags & BTRFS_BLOCK_GROUP_DUP)
6709                 return BTRFS_RAID_DUP;
6710         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6711                 return BTRFS_RAID_RAID0;
6712         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6713                 return BTRFS_RAID_RAID5;
6714         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6715                 return BTRFS_RAID_RAID6;
6716
6717         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6718 }
6719
6720 int get_block_group_index(struct btrfs_block_group_cache *cache)
6721 {
6722         return __get_raid_index(cache->flags);
6723 }
6724
6725 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6726         [BTRFS_RAID_RAID10]     = "raid10",
6727         [BTRFS_RAID_RAID1]      = "raid1",
6728         [BTRFS_RAID_DUP]        = "dup",
6729         [BTRFS_RAID_RAID0]      = "raid0",
6730         [BTRFS_RAID_SINGLE]     = "single",
6731         [BTRFS_RAID_RAID5]      = "raid5",
6732         [BTRFS_RAID_RAID6]      = "raid6",
6733 };
6734
6735 static const char *get_raid_name(enum btrfs_raid_types type)
6736 {
6737         if (type >= BTRFS_NR_RAID_TYPES)
6738                 return NULL;
6739
6740         return btrfs_raid_type_names[type];
6741 }
6742
6743 enum btrfs_loop_type {
6744         LOOP_CACHING_NOWAIT = 0,
6745         LOOP_CACHING_WAIT = 1,
6746         LOOP_ALLOC_CHUNK = 2,
6747         LOOP_NO_EMPTY_SIZE = 3,
6748 };
6749
6750 static inline void
6751 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6752                        int delalloc)
6753 {
6754         if (delalloc)
6755                 down_read(&cache->data_rwsem);
6756 }
6757
6758 static inline void
6759 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6760                        int delalloc)
6761 {
6762         btrfs_get_block_group(cache);
6763         if (delalloc)
6764                 down_read(&cache->data_rwsem);
6765 }
6766
6767 static struct btrfs_block_group_cache *
6768 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6769                    struct btrfs_free_cluster *cluster,
6770                    int delalloc)
6771 {
6772         struct btrfs_block_group_cache *used_bg;
6773         bool locked = false;
6774 again:
6775         spin_lock(&cluster->refill_lock);
6776         if (locked) {
6777                 if (used_bg == cluster->block_group)
6778                         return used_bg;
6779
6780                 up_read(&used_bg->data_rwsem);
6781                 btrfs_put_block_group(used_bg);
6782         }
6783
6784         used_bg = cluster->block_group;
6785         if (!used_bg)
6786                 return NULL;
6787
6788         if (used_bg == block_group)
6789                 return used_bg;
6790
6791         btrfs_get_block_group(used_bg);
6792
6793         if (!delalloc)
6794                 return used_bg;
6795
6796         if (down_read_trylock(&used_bg->data_rwsem))
6797                 return used_bg;
6798
6799         spin_unlock(&cluster->refill_lock);
6800         down_read(&used_bg->data_rwsem);
6801         locked = true;
6802         goto again;
6803 }
6804
6805 static inline void
6806 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
6807                          int delalloc)
6808 {
6809         if (delalloc)
6810                 up_read(&cache->data_rwsem);
6811         btrfs_put_block_group(cache);
6812 }
6813
6814 /*
6815  * walks the btree of allocated extents and find a hole of a given size.
6816  * The key ins is changed to record the hole:
6817  * ins->objectid == start position
6818  * ins->flags = BTRFS_EXTENT_ITEM_KEY
6819  * ins->offset == the size of the hole.
6820  * Any available blocks before search_start are skipped.
6821  *
6822  * If there is no suitable free space, we will record the max size of
6823  * the free space extent currently.
6824  */
6825 static noinline int find_free_extent(struct btrfs_root *orig_root,
6826                                      u64 num_bytes, u64 empty_size,
6827                                      u64 hint_byte, struct btrfs_key *ins,
6828                                      u64 flags, int delalloc)
6829 {
6830         int ret = 0;
6831         struct btrfs_root *root = orig_root->fs_info->extent_root;
6832         struct btrfs_free_cluster *last_ptr = NULL;
6833         struct btrfs_block_group_cache *block_group = NULL;
6834         u64 search_start = 0;
6835         u64 max_extent_size = 0;
6836         int empty_cluster = 2 * 1024 * 1024;
6837         struct btrfs_space_info *space_info;
6838         int loop = 0;
6839         int index = __get_raid_index(flags);
6840         int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6841                 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6842         bool failed_cluster_refill = false;
6843         bool failed_alloc = false;
6844         bool use_cluster = true;
6845         bool have_caching_bg = false;
6846
6847         WARN_ON(num_bytes < root->sectorsize);
6848         ins->type = BTRFS_EXTENT_ITEM_KEY;
6849         ins->objectid = 0;
6850         ins->offset = 0;
6851
6852         trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
6853
6854         space_info = __find_space_info(root->fs_info, flags);
6855         if (!space_info) {
6856                 btrfs_err(root->fs_info, "No space info for %llu", flags);
6857                 return -ENOSPC;
6858         }
6859
6860         /*
6861          * If the space info is for both data and metadata it means we have a
6862          * small filesystem and we can't use the clustering stuff.
6863          */
6864         if (btrfs_mixed_space_info(space_info))
6865                 use_cluster = false;
6866
6867         if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6868                 last_ptr = &root->fs_info->meta_alloc_cluster;
6869                 if (!btrfs_test_opt(root, SSD))
6870                         empty_cluster = 64 * 1024;
6871         }
6872
6873         if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6874             btrfs_test_opt(root, SSD)) {
6875                 last_ptr = &root->fs_info->data_alloc_cluster;
6876         }
6877
6878         if (last_ptr) {
6879                 spin_lock(&last_ptr->lock);
6880                 if (last_ptr->block_group)
6881                         hint_byte = last_ptr->window_start;
6882                 spin_unlock(&last_ptr->lock);
6883         }
6884
6885         search_start = max(search_start, first_logical_byte(root, 0));
6886         search_start = max(search_start, hint_byte);
6887
6888         if (!last_ptr)
6889                 empty_cluster = 0;
6890
6891         if (search_start == hint_byte) {
6892                 block_group = btrfs_lookup_block_group(root->fs_info,
6893                                                        search_start);
6894                 /*
6895                  * we don't want to use the block group if it doesn't match our
6896                  * allocation bits, or if its not cached.
6897                  *
6898                  * However if we are re-searching with an ideal block group
6899                  * picked out then we don't care that the block group is cached.
6900                  */
6901                 if (block_group && block_group_bits(block_group, flags) &&
6902                     block_group->cached != BTRFS_CACHE_NO) {
6903                         down_read(&space_info->groups_sem);
6904                         if (list_empty(&block_group->list) ||
6905                             block_group->ro) {
6906                                 /*
6907                                  * someone is removing this block group,
6908                                  * we can't jump into the have_block_group
6909                                  * target because our list pointers are not
6910                                  * valid
6911                                  */
6912                                 btrfs_put_block_group(block_group);
6913                                 up_read(&space_info->groups_sem);
6914                         } else {
6915                                 index = get_block_group_index(block_group);
6916                                 btrfs_lock_block_group(block_group, delalloc);
6917                                 goto have_block_group;
6918                         }
6919                 } else if (block_group) {
6920                         btrfs_put_block_group(block_group);
6921                 }
6922         }
6923 search:
6924         have_caching_bg = false;
6925         down_read(&space_info->groups_sem);
6926         list_for_each_entry(block_group, &space_info->block_groups[index],
6927                             list) {
6928                 u64 offset;
6929                 int cached;
6930
6931                 btrfs_grab_block_group(block_group, delalloc);
6932                 search_start = block_group->key.objectid;
6933
6934                 /*
6935                  * this can happen if we end up cycling through all the
6936                  * raid types, but we want to make sure we only allocate
6937                  * for the proper type.
6938                  */
6939                 if (!block_group_bits(block_group, flags)) {
6940                     u64 extra = BTRFS_BLOCK_GROUP_DUP |
6941                                 BTRFS_BLOCK_GROUP_RAID1 |
6942                                 BTRFS_BLOCK_GROUP_RAID5 |
6943                                 BTRFS_BLOCK_GROUP_RAID6 |
6944                                 BTRFS_BLOCK_GROUP_RAID10;
6945
6946                         /*
6947                          * if they asked for extra copies and this block group
6948                          * doesn't provide them, bail.  This does allow us to
6949                          * fill raid0 from raid1.
6950                          */
6951                         if ((flags & extra) && !(block_group->flags & extra))
6952                                 goto loop;
6953                 }
6954
6955 have_block_group:
6956                 cached = block_group_cache_done(block_group);
6957                 if (unlikely(!cached)) {
6958                         ret = cache_block_group(block_group, 0);
6959                         BUG_ON(ret < 0);
6960                         ret = 0;
6961                 }
6962
6963                 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
6964                         goto loop;
6965                 if (unlikely(block_group->ro))
6966                         goto loop;
6967
6968                 /*
6969                  * Ok we want to try and use the cluster allocator, so
6970                  * lets look there
6971                  */
6972                 if (last_ptr) {
6973                         struct btrfs_block_group_cache *used_block_group;
6974                         unsigned long aligned_cluster;
6975                         /*
6976                          * the refill lock keeps out other
6977                          * people trying to start a new cluster
6978                          */
6979                         used_block_group = btrfs_lock_cluster(block_group,
6980                                                               last_ptr,
6981                                                               delalloc);
6982                         if (!used_block_group)
6983                                 goto refill_cluster;
6984
6985                         if (used_block_group != block_group &&
6986                             (used_block_group->ro ||
6987                              !block_group_bits(used_block_group, flags)))
6988                                 goto release_cluster;
6989
6990                         offset = btrfs_alloc_from_cluster(used_block_group,
6991                                                 last_ptr,
6992                                                 num_bytes,
6993                                                 used_block_group->key.objectid,
6994                                                 &max_extent_size);
6995                         if (offset) {
6996                                 /* we have a block, we're done */
6997                                 spin_unlock(&last_ptr->refill_lock);
6998                                 trace_btrfs_reserve_extent_cluster(root,
6999                                                 used_block_group,
7000                                                 search_start, num_bytes);
7001                                 if (used_block_group != block_group) {
7002                                         btrfs_release_block_group(block_group,
7003                                                                   delalloc);
7004                                         block_group = used_block_group;
7005                                 }
7006                                 goto checks;
7007                         }
7008
7009                         WARN_ON(last_ptr->block_group != used_block_group);
7010 release_cluster:
7011                         /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7012                          * set up a new clusters, so lets just skip it
7013                          * and let the allocator find whatever block
7014                          * it can find.  If we reach this point, we
7015                          * will have tried the cluster allocator
7016                          * plenty of times and not have found
7017                          * anything, so we are likely way too
7018                          * fragmented for the clustering stuff to find
7019                          * anything.
7020                          *
7021                          * However, if the cluster is taken from the
7022                          * current block group, release the cluster
7023                          * first, so that we stand a better chance of
7024                          * succeeding in the unclustered
7025                          * allocation.  */
7026                         if (loop >= LOOP_NO_EMPTY_SIZE &&
7027                             used_block_group != block_group) {
7028                                 spin_unlock(&last_ptr->refill_lock);
7029                                 btrfs_release_block_group(used_block_group,
7030                                                           delalloc);
7031                                 goto unclustered_alloc;
7032                         }
7033
7034                         /*
7035                          * this cluster didn't work out, free it and
7036                          * start over
7037                          */
7038                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7039
7040                         if (used_block_group != block_group)
7041                                 btrfs_release_block_group(used_block_group,
7042                                                           delalloc);
7043 refill_cluster:
7044                         if (loop >= LOOP_NO_EMPTY_SIZE) {
7045                                 spin_unlock(&last_ptr->refill_lock);
7046                                 goto unclustered_alloc;
7047                         }
7048
7049                         aligned_cluster = max_t(unsigned long,
7050                                                 empty_cluster + empty_size,
7051                                               block_group->full_stripe_len);
7052
7053                         /* allocate a cluster in this block group */
7054                         ret = btrfs_find_space_cluster(root, block_group,
7055                                                        last_ptr, search_start,
7056                                                        num_bytes,
7057                                                        aligned_cluster);
7058                         if (ret == 0) {
7059                                 /*
7060                                  * now pull our allocation out of this
7061                                  * cluster
7062                                  */
7063                                 offset = btrfs_alloc_from_cluster(block_group,
7064                                                         last_ptr,
7065                                                         num_bytes,
7066                                                         search_start,
7067                                                         &max_extent_size);
7068                                 if (offset) {
7069                                         /* we found one, proceed */
7070                                         spin_unlock(&last_ptr->refill_lock);
7071                                         trace_btrfs_reserve_extent_cluster(root,
7072                                                 block_group, search_start,
7073                                                 num_bytes);
7074                                         goto checks;
7075                                 }
7076                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
7077                                    && !failed_cluster_refill) {
7078                                 spin_unlock(&last_ptr->refill_lock);
7079
7080                                 failed_cluster_refill = true;
7081                                 wait_block_group_cache_progress(block_group,
7082                                        num_bytes + empty_cluster + empty_size);
7083                                 goto have_block_group;
7084                         }
7085
7086                         /*
7087                          * at this point we either didn't find a cluster
7088                          * or we weren't able to allocate a block from our
7089                          * cluster.  Free the cluster we've been trying
7090                          * to use, and go to the next block group
7091                          */
7092                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
7093                         spin_unlock(&last_ptr->refill_lock);
7094                         goto loop;
7095                 }
7096
7097 unclustered_alloc:
7098                 spin_lock(&block_group->free_space_ctl->tree_lock);
7099                 if (cached &&
7100                     block_group->free_space_ctl->free_space <
7101                     num_bytes + empty_cluster + empty_size) {
7102                         if (block_group->free_space_ctl->free_space >
7103                             max_extent_size)
7104                                 max_extent_size =
7105                                         block_group->free_space_ctl->free_space;
7106                         spin_unlock(&block_group->free_space_ctl->tree_lock);
7107                         goto loop;
7108                 }
7109                 spin_unlock(&block_group->free_space_ctl->tree_lock);
7110
7111                 offset = btrfs_find_space_for_alloc(block_group, search_start,
7112                                                     num_bytes, empty_size,
7113                                                     &max_extent_size);
7114                 /*
7115                  * If we didn't find a chunk, and we haven't failed on this
7116                  * block group before, and this block group is in the middle of
7117                  * caching and we are ok with waiting, then go ahead and wait
7118                  * for progress to be made, and set failed_alloc to true.
7119                  *
7120                  * If failed_alloc is true then we've already waited on this
7121                  * block group once and should move on to the next block group.
7122                  */
7123                 if (!offset && !failed_alloc && !cached &&
7124                     loop > LOOP_CACHING_NOWAIT) {
7125                         wait_block_group_cache_progress(block_group,
7126                                                 num_bytes + empty_size);
7127                         failed_alloc = true;
7128                         goto have_block_group;
7129                 } else if (!offset) {
7130                         if (!cached)
7131                                 have_caching_bg = true;
7132                         goto loop;
7133                 }
7134 checks:
7135                 search_start = ALIGN(offset, root->stripesize);
7136
7137                 /* move on to the next group */
7138                 if (search_start + num_bytes >
7139                     block_group->key.objectid + block_group->key.offset) {
7140                         btrfs_add_free_space(block_group, offset, num_bytes);
7141                         goto loop;
7142                 }
7143
7144                 if (offset < search_start)
7145                         btrfs_add_free_space(block_group, offset,
7146                                              search_start - offset);
7147                 BUG_ON(offset > search_start);
7148
7149                 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7150                                                   alloc_type, delalloc);
7151                 if (ret == -EAGAIN) {
7152                         btrfs_add_free_space(block_group, offset, num_bytes);
7153                         goto loop;
7154                 }
7155
7156                 /* we are all good, lets return */
7157                 ins->objectid = search_start;
7158                 ins->offset = num_bytes;
7159
7160                 trace_btrfs_reserve_extent(orig_root, block_group,
7161                                            search_start, num_bytes);
7162                 btrfs_release_block_group(block_group, delalloc);
7163                 break;
7164 loop:
7165                 failed_cluster_refill = false;
7166                 failed_alloc = false;
7167                 BUG_ON(index != get_block_group_index(block_group));
7168                 btrfs_release_block_group(block_group, delalloc);
7169         }
7170         up_read(&space_info->groups_sem);
7171
7172         if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7173                 goto search;
7174
7175         if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7176                 goto search;
7177
7178         /*
7179          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7180          *                      caching kthreads as we move along
7181          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7182          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7183          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7184          *                      again
7185          */
7186         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7187                 index = 0;
7188                 loop++;
7189                 if (loop == LOOP_ALLOC_CHUNK) {
7190                         struct btrfs_trans_handle *trans;
7191                         int exist = 0;
7192
7193                         trans = current->journal_info;
7194                         if (trans)
7195                                 exist = 1;
7196                         else
7197                                 trans = btrfs_join_transaction(root);
7198
7199                         if (IS_ERR(trans)) {
7200                                 ret = PTR_ERR(trans);
7201                                 goto out;
7202                         }
7203
7204                         ret = do_chunk_alloc(trans, root, flags,
7205                                              CHUNK_ALLOC_FORCE);
7206                         /*
7207                          * Do not bail out on ENOSPC since we
7208                          * can do more things.
7209                          */
7210                         if (ret < 0 && ret != -ENOSPC)
7211                                 btrfs_abort_transaction(trans,
7212                                                         root, ret);
7213                         else
7214                                 ret = 0;
7215                         if (!exist)
7216                                 btrfs_end_transaction(trans, root);
7217                         if (ret)
7218                                 goto out;
7219                 }
7220
7221                 if (loop == LOOP_NO_EMPTY_SIZE) {
7222                         empty_size = 0;
7223                         empty_cluster = 0;
7224                 }
7225
7226                 goto search;
7227         } else if (!ins->objectid) {
7228                 ret = -ENOSPC;
7229         } else if (ins->objectid) {
7230                 ret = 0;
7231         }
7232 out:
7233         if (ret == -ENOSPC)
7234                 ins->offset = max_extent_size;
7235         return ret;
7236 }
7237
7238 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7239                             int dump_block_groups)
7240 {
7241         struct btrfs_block_group_cache *cache;
7242         int index = 0;
7243
7244         spin_lock(&info->lock);
7245         printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7246                info->flags,
7247                info->total_bytes - info->bytes_used - info->bytes_pinned -
7248                info->bytes_reserved - info->bytes_readonly,
7249                (info->full) ? "" : "not ");
7250         printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7251                "reserved=%llu, may_use=%llu, readonly=%llu\n",
7252                info->total_bytes, info->bytes_used, info->bytes_pinned,
7253                info->bytes_reserved, info->bytes_may_use,
7254                info->bytes_readonly);
7255         spin_unlock(&info->lock);
7256
7257         if (!dump_block_groups)
7258                 return;
7259
7260         down_read(&info->groups_sem);
7261 again:
7262         list_for_each_entry(cache, &info->block_groups[index], list) {
7263                 spin_lock(&cache->lock);
7264                 printk(KERN_INFO "BTRFS: "
7265                            "block group %llu has %llu bytes, "
7266                            "%llu used %llu pinned %llu reserved %s\n",
7267                        cache->key.objectid, cache->key.offset,
7268                        btrfs_block_group_used(&cache->item), cache->pinned,
7269                        cache->reserved, cache->ro ? "[readonly]" : "");
7270                 btrfs_dump_free_space(cache, bytes);
7271                 spin_unlock(&cache->lock);
7272         }
7273         if (++index < BTRFS_NR_RAID_TYPES)
7274                 goto again;
7275         up_read(&info->groups_sem);
7276 }
7277
7278 int btrfs_reserve_extent(struct btrfs_root *root,
7279                          u64 num_bytes, u64 min_alloc_size,
7280                          u64 empty_size, u64 hint_byte,
7281                          struct btrfs_key *ins, int is_data, int delalloc)
7282 {
7283         bool final_tried = false;
7284         u64 flags;
7285         int ret;
7286
7287         flags = btrfs_get_alloc_profile(root, is_data);
7288 again:
7289         WARN_ON(num_bytes < root->sectorsize);
7290         ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7291                                flags, delalloc);
7292
7293         if (ret == -ENOSPC) {
7294                 if (!final_tried && ins->offset) {
7295                         num_bytes = min(num_bytes >> 1, ins->offset);
7296                         num_bytes = round_down(num_bytes, root->sectorsize);
7297                         num_bytes = max(num_bytes, min_alloc_size);
7298                         if (num_bytes == min_alloc_size)
7299                                 final_tried = true;
7300                         goto again;
7301                 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7302                         struct btrfs_space_info *sinfo;
7303
7304                         sinfo = __find_space_info(root->fs_info, flags);
7305                         btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7306                                 flags, num_bytes);
7307                         if (sinfo)
7308                                 dump_space_info(sinfo, num_bytes, 1);
7309                 }
7310         }
7311
7312         return ret;
7313 }
7314
7315 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7316                                         u64 start, u64 len,
7317                                         int pin, int delalloc)
7318 {
7319         struct btrfs_block_group_cache *cache;
7320         int ret = 0;
7321
7322         cache = btrfs_lookup_block_group(root->fs_info, start);
7323         if (!cache) {
7324                 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7325                         start);
7326                 return -ENOSPC;
7327         }
7328
7329         if (pin)
7330                 pin_down_extent(root, cache, start, len, 1);
7331         else {
7332                 if (btrfs_test_opt(root, DISCARD))
7333                         ret = btrfs_discard_extent(root, start, len, NULL);
7334                 btrfs_add_free_space(cache, start, len);
7335                 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7336         }
7337
7338         btrfs_put_block_group(cache);
7339
7340         trace_btrfs_reserved_extent_free(root, start, len);
7341
7342         return ret;
7343 }
7344
7345 int btrfs_free_reserved_extent(struct btrfs_root *root,
7346                                u64 start, u64 len, int delalloc)
7347 {
7348         return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7349 }
7350
7351 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7352                                        u64 start, u64 len)
7353 {
7354         return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7355 }
7356
7357 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7358                                       struct btrfs_root *root,
7359                                       u64 parent, u64 root_objectid,
7360                                       u64 flags, u64 owner, u64 offset,
7361                                       struct btrfs_key *ins, int ref_mod)
7362 {
7363         int ret;
7364         struct btrfs_fs_info *fs_info = root->fs_info;
7365         struct btrfs_extent_item *extent_item;
7366         struct btrfs_extent_inline_ref *iref;
7367         struct btrfs_path *path;
7368         struct extent_buffer *leaf;
7369         int type;
7370         u32 size;
7371
7372         if (parent > 0)
7373                 type = BTRFS_SHARED_DATA_REF_KEY;
7374         else
7375                 type = BTRFS_EXTENT_DATA_REF_KEY;
7376
7377         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7378
7379         path = btrfs_alloc_path();
7380         if (!path)
7381                 return -ENOMEM;
7382
7383         path->leave_spinning = 1;
7384         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7385                                       ins, size);
7386         if (ret) {
7387                 btrfs_free_path(path);
7388                 return ret;
7389         }
7390
7391         leaf = path->nodes[0];
7392         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7393                                      struct btrfs_extent_item);
7394         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7395         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7396         btrfs_set_extent_flags(leaf, extent_item,
7397                                flags | BTRFS_EXTENT_FLAG_DATA);
7398
7399         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7400         btrfs_set_extent_inline_ref_type(leaf, iref, type);
7401         if (parent > 0) {
7402                 struct btrfs_shared_data_ref *ref;
7403                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7404                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7405                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7406         } else {
7407                 struct btrfs_extent_data_ref *ref;
7408                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7409                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7410                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7411                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7412                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7413         }
7414
7415         btrfs_mark_buffer_dirty(path->nodes[0]);
7416         btrfs_free_path(path);
7417
7418         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7419         if (ret) { /* -ENOENT, logic error */
7420                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7421                         ins->objectid, ins->offset);
7422                 BUG();
7423         }
7424         trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7425         return ret;
7426 }
7427
7428 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7429                                      struct btrfs_root *root,
7430                                      u64 parent, u64 root_objectid,
7431                                      u64 flags, struct btrfs_disk_key *key,
7432                                      int level, struct btrfs_key *ins,
7433                                      int no_quota)
7434 {
7435         int ret;
7436         struct btrfs_fs_info *fs_info = root->fs_info;
7437         struct btrfs_extent_item *extent_item;
7438         struct btrfs_tree_block_info *block_info;
7439         struct btrfs_extent_inline_ref *iref;
7440         struct btrfs_path *path;
7441         struct extent_buffer *leaf;
7442         u32 size = sizeof(*extent_item) + sizeof(*iref);
7443         u64 num_bytes = ins->offset;
7444         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7445                                                  SKINNY_METADATA);
7446
7447         if (!skinny_metadata)
7448                 size += sizeof(*block_info);
7449
7450         path = btrfs_alloc_path();
7451         if (!path) {
7452                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7453                                                    root->nodesize);
7454                 return -ENOMEM;
7455         }
7456
7457         path->leave_spinning = 1;
7458         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7459                                       ins, size);
7460         if (ret) {
7461                 btrfs_free_path(path);
7462                 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7463                                                    root->nodesize);
7464                 return ret;
7465         }
7466
7467         leaf = path->nodes[0];
7468         extent_item = btrfs_item_ptr(leaf, path->slots[0],
7469                                      struct btrfs_extent_item);
7470         btrfs_set_extent_refs(leaf, extent_item, 1);
7471         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7472         btrfs_set_extent_flags(leaf, extent_item,
7473                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7474
7475         if (skinny_metadata) {
7476                 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7477                 num_bytes = root->nodesize;
7478         } else {
7479                 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7480                 btrfs_set_tree_block_key(leaf, block_info, key);
7481                 btrfs_set_tree_block_level(leaf, block_info, level);
7482                 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7483         }
7484
7485         if (parent > 0) {
7486                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7487                 btrfs_set_extent_inline_ref_type(leaf, iref,
7488                                                  BTRFS_SHARED_BLOCK_REF_KEY);
7489                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7490         } else {
7491                 btrfs_set_extent_inline_ref_type(leaf, iref,
7492                                                  BTRFS_TREE_BLOCK_REF_KEY);
7493                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7494         }
7495
7496         btrfs_mark_buffer_dirty(leaf);
7497         btrfs_free_path(path);
7498
7499         ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7500                                  1);
7501         if (ret) { /* -ENOENT, logic error */
7502                 btrfs_err(fs_info, "update block group failed for %llu %llu",
7503                         ins->objectid, ins->offset);
7504                 BUG();
7505         }
7506
7507         trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7508         return ret;
7509 }
7510
7511 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7512                                      struct btrfs_root *root,
7513                                      u64 root_objectid, u64 owner,
7514                                      u64 offset, struct btrfs_key *ins)
7515 {
7516         int ret;
7517
7518         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7519
7520         ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7521                                          ins->offset, 0,
7522                                          root_objectid, owner, offset,
7523                                          BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7524         return ret;
7525 }
7526
7527 /*
7528  * this is used by the tree logging recovery code.  It records that
7529  * an extent has been allocated and makes sure to clear the free
7530  * space cache bits as well
7531  */
7532 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7533                                    struct btrfs_root *root,
7534                                    u64 root_objectid, u64 owner, u64 offset,
7535                                    struct btrfs_key *ins)
7536 {
7537         int ret;
7538         struct btrfs_block_group_cache *block_group;
7539
7540         /*
7541          * Mixed block groups will exclude before processing the log so we only
7542          * need to do the exlude dance if this fs isn't mixed.
7543          */
7544         if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7545                 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7546                 if (ret)
7547                         return ret;
7548         }
7549
7550         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7551         if (!block_group)
7552                 return -EINVAL;
7553
7554         ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7555                                           RESERVE_ALLOC_NO_ACCOUNT, 0);
7556         BUG_ON(ret); /* logic error */
7557         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7558                                          0, owner, offset, ins, 1);
7559         btrfs_put_block_group(block_group);
7560         return ret;
7561 }
7562
7563 static struct extent_buffer *
7564 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7565                       u64 bytenr, int level)
7566 {
7567         struct extent_buffer *buf;
7568
7569         buf = btrfs_find_create_tree_block(root, bytenr);
7570         if (!buf)
7571                 return ERR_PTR(-ENOMEM);
7572         btrfs_set_header_generation(buf, trans->transid);
7573         btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7574         btrfs_tree_lock(buf);
7575         clean_tree_block(trans, root->fs_info, buf);
7576         clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7577
7578         btrfs_set_lock_blocking(buf);
7579         btrfs_set_buffer_uptodate(buf);
7580
7581         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7582                 buf->log_index = root->log_transid % 2;
7583                 /*
7584                  * we allow two log transactions at a time, use different
7585                  * EXENT bit to differentiate dirty pages.
7586                  */
7587                 if (buf->log_index == 0)
7588                         set_extent_dirty(&root->dirty_log_pages, buf->start,
7589                                         buf->start + buf->len - 1, GFP_NOFS);
7590                 else
7591                         set_extent_new(&root->dirty_log_pages, buf->start,
7592                                         buf->start + buf->len - 1, GFP_NOFS);
7593         } else {
7594                 buf->log_index = -1;
7595                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7596                          buf->start + buf->len - 1, GFP_NOFS);
7597         }
7598         trans->blocks_used++;
7599         /* this returns a buffer locked for blocking */
7600         return buf;
7601 }
7602
7603 static struct btrfs_block_rsv *
7604 use_block_rsv(struct btrfs_trans_handle *trans,
7605               struct btrfs_root *root, u32 blocksize)
7606 {
7607         struct btrfs_block_rsv *block_rsv;
7608         struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7609         int ret;
7610         bool global_updated = false;
7611
7612         block_rsv = get_block_rsv(trans, root);
7613
7614         if (unlikely(block_rsv->size == 0))
7615                 goto try_reserve;
7616 again:
7617         ret = block_rsv_use_bytes(block_rsv, blocksize);
7618         if (!ret)
7619                 return block_rsv;
7620
7621         if (block_rsv->failfast)
7622                 return ERR_PTR(ret);
7623
7624         if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7625                 global_updated = true;
7626                 update_global_block_rsv(root->fs_info);
7627                 goto again;
7628         }
7629
7630         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7631                 static DEFINE_RATELIMIT_STATE(_rs,
7632                                 DEFAULT_RATELIMIT_INTERVAL * 10,
7633                                 /*DEFAULT_RATELIMIT_BURST*/ 1);
7634                 if (__ratelimit(&_rs))
7635                         WARN(1, KERN_DEBUG
7636                                 "BTRFS: block rsv returned %d\n", ret);
7637         }
7638 try_reserve:
7639         ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7640                                      BTRFS_RESERVE_NO_FLUSH);
7641         if (!ret)
7642                 return block_rsv;
7643         /*
7644          * If we couldn't reserve metadata bytes try and use some from
7645          * the global reserve if its space type is the same as the global
7646          * reservation.
7647          */
7648         if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7649             block_rsv->space_info == global_rsv->space_info) {
7650                 ret = block_rsv_use_bytes(global_rsv, blocksize);
7651                 if (!ret)
7652                         return global_rsv;
7653         }
7654         return ERR_PTR(ret);
7655 }
7656
7657 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7658                             struct btrfs_block_rsv *block_rsv, u32 blocksize)
7659 {
7660         block_rsv_add_bytes(block_rsv, blocksize, 0);
7661         block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7662 }
7663
7664 /*
7665  * finds a free extent and does all the dirty work required for allocation
7666  * returns the tree buffer or an ERR_PTR on error.
7667  */
7668 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7669                                         struct btrfs_root *root,
7670                                         u64 parent, u64 root_objectid,
7671                                         struct btrfs_disk_key *key, int level,
7672                                         u64 hint, u64 empty_size)
7673 {
7674         struct btrfs_key ins;
7675         struct btrfs_block_rsv *block_rsv;
7676         struct extent_buffer *buf;
7677         struct btrfs_delayed_extent_op *extent_op;
7678         u64 flags = 0;
7679         int ret;
7680         u32 blocksize = root->nodesize;
7681         bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7682                                                  SKINNY_METADATA);
7683
7684         if (btrfs_test_is_dummy_root(root)) {
7685                 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7686                                             level);
7687                 if (!IS_ERR(buf))
7688                         root->alloc_bytenr += blocksize;
7689                 return buf;
7690         }
7691
7692         block_rsv = use_block_rsv(trans, root, blocksize);
7693         if (IS_ERR(block_rsv))
7694                 return ERR_CAST(block_rsv);
7695
7696         ret = btrfs_reserve_extent(root, blocksize, blocksize,
7697                                    empty_size, hint, &ins, 0, 0);
7698         if (ret)
7699                 goto out_unuse;
7700
7701         buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7702         if (IS_ERR(buf)) {
7703                 ret = PTR_ERR(buf);
7704                 goto out_free_reserved;
7705         }
7706
7707         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
7708                 if (parent == 0)
7709                         parent = ins.objectid;
7710                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
7711         } else
7712                 BUG_ON(parent > 0);
7713
7714         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7715                 extent_op = btrfs_alloc_delayed_extent_op();
7716                 if (!extent_op) {
7717                         ret = -ENOMEM;
7718                         goto out_free_buf;
7719                 }
7720                 if (key)
7721                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
7722                 else
7723                         memset(&extent_op->key, 0, sizeof(extent_op->key));
7724                 extent_op->flags_to_set = flags;
7725                 if (skinny_metadata)
7726                         extent_op->update_key = 0;
7727                 else
7728                         extent_op->update_key = 1;
7729                 extent_op->update_flags = 1;
7730                 extent_op->is_data = 0;
7731                 extent_op->level = level;
7732
7733                 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7734                                                  ins.objectid, ins.offset,
7735                                                  parent, root_objectid, level,
7736                                                  BTRFS_ADD_DELAYED_EXTENT,
7737                                                  extent_op, 0);
7738                 if (ret)
7739                         goto out_free_delayed;
7740         }
7741         return buf;
7742
7743 out_free_delayed:
7744         btrfs_free_delayed_extent_op(extent_op);
7745 out_free_buf:
7746         free_extent_buffer(buf);
7747 out_free_reserved:
7748         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
7749 out_unuse:
7750         unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7751         return ERR_PTR(ret);
7752 }
7753
7754 struct walk_control {
7755         u64 refs[BTRFS_MAX_LEVEL];
7756         u64 flags[BTRFS_MAX_LEVEL];
7757         struct btrfs_key update_progress;
7758         int stage;
7759         int level;
7760         int shared_level;
7761         int update_ref;
7762         int keep_locks;
7763         int reada_slot;
7764         int reada_count;
7765         int for_reloc;
7766 };
7767
7768 #define DROP_REFERENCE  1
7769 #define UPDATE_BACKREF  2
7770
7771 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
7772                                      struct btrfs_root *root,
7773                                      struct walk_control *wc,
7774                                      struct btrfs_path *path)
7775 {
7776         u64 bytenr;
7777         u64 generation;
7778         u64 refs;
7779         u64 flags;
7780         u32 nritems;
7781         u32 blocksize;
7782         struct btrfs_key key;
7783         struct extent_buffer *eb;
7784         int ret;
7785         int slot;
7786         int nread = 0;
7787
7788         if (path->slots[wc->level] < wc->reada_slot) {
7789                 wc->reada_count = wc->reada_count * 2 / 3;
7790                 wc->reada_count = max(wc->reada_count, 2);
7791         } else {
7792                 wc->reada_count = wc->reada_count * 3 / 2;
7793                 wc->reada_count = min_t(int, wc->reada_count,
7794                                         BTRFS_NODEPTRS_PER_BLOCK(root));
7795         }
7796
7797         eb = path->nodes[wc->level];
7798         nritems = btrfs_header_nritems(eb);
7799         blocksize = root->nodesize;
7800
7801         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
7802                 if (nread >= wc->reada_count)
7803                         break;
7804
7805                 cond_resched();
7806                 bytenr = btrfs_node_blockptr(eb, slot);
7807                 generation = btrfs_node_ptr_generation(eb, slot);
7808
7809                 if (slot == path->slots[wc->level])
7810                         goto reada;
7811
7812                 if (wc->stage == UPDATE_BACKREF &&
7813                     generation <= root->root_key.offset)
7814                         continue;
7815
7816                 /* We don't lock the tree block, it's OK to be racy here */
7817                 ret = btrfs_lookup_extent_info(trans, root, bytenr,
7818                                                wc->level - 1, 1, &refs,
7819                                                &flags);
7820                 /* We don't care about errors in readahead. */
7821                 if (ret < 0)
7822                         continue;
7823                 BUG_ON(refs == 0);
7824
7825                 if (wc->stage == DROP_REFERENCE) {
7826                         if (refs == 1)
7827                                 goto reada;
7828
7829                         if (wc->level == 1 &&
7830                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7831                                 continue;
7832                         if (!wc->update_ref ||
7833                             generation <= root->root_key.offset)
7834                                 continue;
7835                         btrfs_node_key_to_cpu(eb, &key, slot);
7836                         ret = btrfs_comp_cpu_keys(&key,
7837                                                   &wc->update_progress);
7838                         if (ret < 0)
7839                                 continue;
7840                 } else {
7841                         if (wc->level == 1 &&
7842                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
7843                                 continue;
7844                 }
7845 reada:
7846                 readahead_tree_block(root, bytenr);
7847                 nread++;
7848         }
7849         wc->reada_slot = slot;
7850 }
7851
7852 /*
7853  * TODO: Modify related function to add related node/leaf to dirty_extent_root,
7854  * for later qgroup accounting.
7855  *
7856  * Current, this function does nothing.
7857  */
7858 static int account_leaf_items(struct btrfs_trans_handle *trans,
7859                               struct btrfs_root *root,
7860                               struct extent_buffer *eb)
7861 {
7862         int nr = btrfs_header_nritems(eb);
7863         int i, extent_type;
7864         struct btrfs_key key;
7865         struct btrfs_file_extent_item *fi;
7866         u64 bytenr, num_bytes;
7867
7868         for (i = 0; i < nr; i++) {
7869                 btrfs_item_key_to_cpu(eb, &key, i);
7870
7871                 if (key.type != BTRFS_EXTENT_DATA_KEY)
7872                         continue;
7873
7874                 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
7875                 /* filter out non qgroup-accountable extents  */
7876                 extent_type = btrfs_file_extent_type(eb, fi);
7877
7878                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
7879                         continue;
7880
7881                 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
7882                 if (!bytenr)
7883                         continue;
7884
7885                 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
7886         }
7887         return 0;
7888 }
7889
7890 /*
7891  * Walk up the tree from the bottom, freeing leaves and any interior
7892  * nodes which have had all slots visited. If a node (leaf or
7893  * interior) is freed, the node above it will have it's slot
7894  * incremented. The root node will never be freed.
7895  *
7896  * At the end of this function, we should have a path which has all
7897  * slots incremented to the next position for a search. If we need to
7898  * read a new node it will be NULL and the node above it will have the
7899  * correct slot selected for a later read.
7900  *
7901  * If we increment the root nodes slot counter past the number of
7902  * elements, 1 is returned to signal completion of the search.
7903  */
7904 static int adjust_slots_upwards(struct btrfs_root *root,
7905                                 struct btrfs_path *path, int root_level)
7906 {
7907         int level = 0;
7908         int nr, slot;
7909         struct extent_buffer *eb;
7910
7911         if (root_level == 0)
7912                 return 1;
7913
7914         while (level <= root_level) {
7915                 eb = path->nodes[level];
7916                 nr = btrfs_header_nritems(eb);
7917                 path->slots[level]++;
7918                 slot = path->slots[level];
7919                 if (slot >= nr || level == 0) {
7920                         /*
7921                          * Don't free the root -  we will detect this
7922                          * condition after our loop and return a
7923                          * positive value for caller to stop walking the tree.
7924                          */
7925                         if (level != root_level) {
7926                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
7927                                 path->locks[level] = 0;
7928
7929                                 free_extent_buffer(eb);
7930                                 path->nodes[level] = NULL;
7931                                 path->slots[level] = 0;
7932                         }
7933                 } else {
7934                         /*
7935                          * We have a valid slot to walk back down
7936                          * from. Stop here so caller can process these
7937                          * new nodes.
7938                          */
7939                         break;
7940                 }
7941
7942                 level++;
7943         }
7944
7945         eb = path->nodes[root_level];
7946         if (path->slots[root_level] >= btrfs_header_nritems(eb))
7947                 return 1;
7948
7949         return 0;
7950 }
7951
7952 /*
7953  * root_eb is the subtree root and is locked before this function is called.
7954  * TODO: Modify this function to mark all (including complete shared node)
7955  * to dirty_extent_root to allow it get accounted in qgroup.
7956  */
7957 static int account_shared_subtree(struct btrfs_trans_handle *trans,
7958                                   struct btrfs_root *root,
7959                                   struct extent_buffer *root_eb,
7960                                   u64 root_gen,
7961                                   int root_level)
7962 {
7963         int ret = 0;
7964         int level;
7965         struct extent_buffer *eb = root_eb;
7966         struct btrfs_path *path = NULL;
7967
7968         BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
7969         BUG_ON(root_eb == NULL);
7970
7971         if (!root->fs_info->quota_enabled)
7972                 return 0;
7973
7974         if (!extent_buffer_uptodate(root_eb)) {
7975                 ret = btrfs_read_buffer(root_eb, root_gen);
7976                 if (ret)
7977                         goto out;
7978         }
7979
7980         if (root_level == 0) {
7981                 ret = account_leaf_items(trans, root, root_eb);
7982                 goto out;
7983         }
7984
7985         path = btrfs_alloc_path();
7986         if (!path)
7987                 return -ENOMEM;
7988
7989         /*
7990          * Walk down the tree.  Missing extent blocks are filled in as
7991          * we go. Metadata is accounted every time we read a new
7992          * extent block.
7993          *
7994          * When we reach a leaf, we account for file extent items in it,
7995          * walk back up the tree (adjusting slot pointers as we go)
7996          * and restart the search process.
7997          */
7998         extent_buffer_get(root_eb); /* For path */
7999         path->nodes[root_level] = root_eb;
8000         path->slots[root_level] = 0;
8001         path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8002 walk_down:
8003         level = root_level;
8004         while (level >= 0) {
8005                 if (path->nodes[level] == NULL) {
8006                         int parent_slot;
8007                         u64 child_gen;
8008                         u64 child_bytenr;
8009
8010                         /* We need to get child blockptr/gen from
8011                          * parent before we can read it. */
8012                         eb = path->nodes[level + 1];
8013                         parent_slot = path->slots[level + 1];
8014                         child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8015                         child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8016
8017                         eb = read_tree_block(root, child_bytenr, child_gen);
8018                         if (IS_ERR(eb)) {
8019                                 ret = PTR_ERR(eb);
8020                                 goto out;
8021                         } else if (!extent_buffer_uptodate(eb)) {
8022                                 free_extent_buffer(eb);
8023                                 ret = -EIO;
8024                                 goto out;
8025                         }
8026
8027                         path->nodes[level] = eb;
8028                         path->slots[level] = 0;
8029
8030                         btrfs_tree_read_lock(eb);
8031                         btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8032                         path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8033                 }
8034
8035                 if (level == 0) {
8036                         ret = account_leaf_items(trans, root, path->nodes[level]);
8037                         if (ret)
8038                                 goto out;
8039
8040                         /* Nonzero return here means we completed our search */
8041                         ret = adjust_slots_upwards(root, path, root_level);
8042                         if (ret)
8043                                 break;
8044
8045                         /* Restart search with new slots */
8046                         goto walk_down;
8047                 }
8048
8049                 level--;
8050         }
8051
8052         ret = 0;
8053 out:
8054         btrfs_free_path(path);
8055
8056         return ret;
8057 }
8058
8059 /*
8060  * helper to process tree block while walking down the tree.
8061  *
8062  * when wc->stage == UPDATE_BACKREF, this function updates
8063  * back refs for pointers in the block.
8064  *
8065  * NOTE: return value 1 means we should stop walking down.
8066  */
8067 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8068                                    struct btrfs_root *root,
8069                                    struct btrfs_path *path,
8070                                    struct walk_control *wc, int lookup_info)
8071 {
8072         int level = wc->level;
8073         struct extent_buffer *eb = path->nodes[level];
8074         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8075         int ret;
8076
8077         if (wc->stage == UPDATE_BACKREF &&
8078             btrfs_header_owner(eb) != root->root_key.objectid)
8079                 return 1;
8080
8081         /*
8082          * when reference count of tree block is 1, it won't increase
8083          * again. once full backref flag is set, we never clear it.
8084          */
8085         if (lookup_info &&
8086             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8087              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8088                 BUG_ON(!path->locks[level]);
8089                 ret = btrfs_lookup_extent_info(trans, root,
8090                                                eb->start, level, 1,
8091                                                &wc->refs[level],
8092                                                &wc->flags[level]);
8093                 BUG_ON(ret == -ENOMEM);
8094                 if (ret)
8095                         return ret;
8096                 BUG_ON(wc->refs[level] == 0);
8097         }
8098
8099         if (wc->stage == DROP_REFERENCE) {
8100                 if (wc->refs[level] > 1)
8101                         return 1;
8102
8103                 if (path->locks[level] && !wc->keep_locks) {
8104                         btrfs_tree_unlock_rw(eb, path->locks[level]);
8105                         path->locks[level] = 0;
8106                 }
8107                 return 0;
8108         }
8109
8110         /* wc->stage == UPDATE_BACKREF */
8111         if (!(wc->flags[level] & flag)) {
8112                 BUG_ON(!path->locks[level]);
8113                 ret = btrfs_inc_ref(trans, root, eb, 1);
8114                 BUG_ON(ret); /* -ENOMEM */
8115                 ret = btrfs_dec_ref(trans, root, eb, 0);
8116                 BUG_ON(ret); /* -ENOMEM */
8117                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8118                                                   eb->len, flag,
8119                                                   btrfs_header_level(eb), 0);
8120                 BUG_ON(ret); /* -ENOMEM */
8121                 wc->flags[level] |= flag;
8122         }
8123
8124         /*
8125          * the block is shared by multiple trees, so it's not good to
8126          * keep the tree lock
8127          */
8128         if (path->locks[level] && level > 0) {
8129                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8130                 path->locks[level] = 0;
8131         }
8132         return 0;
8133 }
8134
8135 /*
8136  * helper to process tree block pointer.
8137  *
8138  * when wc->stage == DROP_REFERENCE, this function checks
8139  * reference count of the block pointed to. if the block
8140  * is shared and we need update back refs for the subtree
8141  * rooted at the block, this function changes wc->stage to
8142  * UPDATE_BACKREF. if the block is shared and there is no
8143  * need to update back, this function drops the reference
8144  * to the block.
8145  *
8146  * NOTE: return value 1 means we should stop walking down.
8147  */
8148 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8149                                  struct btrfs_root *root,
8150                                  struct btrfs_path *path,
8151                                  struct walk_control *wc, int *lookup_info)
8152 {
8153         u64 bytenr;
8154         u64 generation;
8155         u64 parent;
8156         u32 blocksize;
8157         struct btrfs_key key;
8158         struct extent_buffer *next;
8159         int level = wc->level;
8160         int reada = 0;
8161         int ret = 0;
8162         bool need_account = false;
8163
8164         generation = btrfs_node_ptr_generation(path->nodes[level],
8165                                                path->slots[level]);
8166         /*
8167          * if the lower level block was created before the snapshot
8168          * was created, we know there is no need to update back refs
8169          * for the subtree
8170          */
8171         if (wc->stage == UPDATE_BACKREF &&
8172             generation <= root->root_key.offset) {
8173                 *lookup_info = 1;
8174                 return 1;
8175         }
8176
8177         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8178         blocksize = root->nodesize;
8179
8180         next = btrfs_find_tree_block(root->fs_info, bytenr);
8181         if (!next) {
8182                 next = btrfs_find_create_tree_block(root, bytenr);
8183                 if (!next)
8184                         return -ENOMEM;
8185                 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8186                                                level - 1);
8187                 reada = 1;
8188         }
8189         btrfs_tree_lock(next);
8190         btrfs_set_lock_blocking(next);
8191
8192         ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8193                                        &wc->refs[level - 1],
8194                                        &wc->flags[level - 1]);
8195         if (ret < 0) {
8196                 btrfs_tree_unlock(next);
8197                 return ret;
8198         }
8199
8200         if (unlikely(wc->refs[level - 1] == 0)) {
8201                 btrfs_err(root->fs_info, "Missing references.");
8202                 BUG();
8203         }
8204         *lookup_info = 0;
8205
8206         if (wc->stage == DROP_REFERENCE) {
8207                 if (wc->refs[level - 1] > 1) {
8208                         need_account = true;
8209                         if (level == 1 &&
8210                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8211                                 goto skip;
8212
8213                         if (!wc->update_ref ||
8214                             generation <= root->root_key.offset)
8215                                 goto skip;
8216
8217                         btrfs_node_key_to_cpu(path->nodes[level], &key,
8218                                               path->slots[level]);
8219                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8220                         if (ret < 0)
8221                                 goto skip;
8222
8223                         wc->stage = UPDATE_BACKREF;
8224                         wc->shared_level = level - 1;
8225                 }
8226         } else {
8227                 if (level == 1 &&
8228                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8229                         goto skip;
8230         }
8231
8232         if (!btrfs_buffer_uptodate(next, generation, 0)) {
8233                 btrfs_tree_unlock(next);
8234                 free_extent_buffer(next);
8235                 next = NULL;
8236                 *lookup_info = 1;
8237         }
8238
8239         if (!next) {
8240                 if (reada && level == 1)
8241                         reada_walk_down(trans, root, wc, path);
8242                 next = read_tree_block(root, bytenr, generation);
8243                 if (IS_ERR(next)) {
8244                         return PTR_ERR(next);
8245                 } else if (!extent_buffer_uptodate(next)) {
8246                         free_extent_buffer(next);
8247                         return -EIO;
8248                 }
8249                 btrfs_tree_lock(next);
8250                 btrfs_set_lock_blocking(next);
8251         }
8252
8253         level--;
8254         BUG_ON(level != btrfs_header_level(next));
8255         path->nodes[level] = next;
8256         path->slots[level] = 0;
8257         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8258         wc->level = level;
8259         if (wc->level == 1)
8260                 wc->reada_slot = 0;
8261         return 0;
8262 skip:
8263         wc->refs[level - 1] = 0;
8264         wc->flags[level - 1] = 0;
8265         if (wc->stage == DROP_REFERENCE) {
8266                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8267                         parent = path->nodes[level]->start;
8268                 } else {
8269                         BUG_ON(root->root_key.objectid !=
8270                                btrfs_header_owner(path->nodes[level]));
8271                         parent = 0;
8272                 }
8273
8274                 if (need_account) {
8275                         ret = account_shared_subtree(trans, root, next,
8276                                                      generation, level - 1);
8277                         if (ret) {
8278                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8279                                         "%d accounting shared subtree. Quota "
8280                                         "is out of sync, rescan required.\n",
8281                                         root->fs_info->sb->s_id, ret);
8282                         }
8283                 }
8284                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8285                                 root->root_key.objectid, level - 1, 0, 0);
8286                 BUG_ON(ret); /* -ENOMEM */
8287         }
8288         btrfs_tree_unlock(next);
8289         free_extent_buffer(next);
8290         *lookup_info = 1;
8291         return 1;
8292 }
8293
8294 /*
8295  * helper to process tree block while walking up the tree.
8296  *
8297  * when wc->stage == DROP_REFERENCE, this function drops
8298  * reference count on the block.
8299  *
8300  * when wc->stage == UPDATE_BACKREF, this function changes
8301  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8302  * to UPDATE_BACKREF previously while processing the block.
8303  *
8304  * NOTE: return value 1 means we should stop walking up.
8305  */
8306 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8307                                  struct btrfs_root *root,
8308                                  struct btrfs_path *path,
8309                                  struct walk_control *wc)
8310 {
8311         int ret;
8312         int level = wc->level;
8313         struct extent_buffer *eb = path->nodes[level];
8314         u64 parent = 0;
8315
8316         if (wc->stage == UPDATE_BACKREF) {
8317                 BUG_ON(wc->shared_level < level);
8318                 if (level < wc->shared_level)
8319                         goto out;
8320
8321                 ret = find_next_key(path, level + 1, &wc->update_progress);
8322                 if (ret > 0)
8323                         wc->update_ref = 0;
8324
8325                 wc->stage = DROP_REFERENCE;
8326                 wc->shared_level = -1;
8327                 path->slots[level] = 0;
8328
8329                 /*
8330                  * check reference count again if the block isn't locked.
8331                  * we should start walking down the tree again if reference
8332                  * count is one.
8333                  */
8334                 if (!path->locks[level]) {
8335                         BUG_ON(level == 0);
8336                         btrfs_tree_lock(eb);
8337                         btrfs_set_lock_blocking(eb);
8338                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8339
8340                         ret = btrfs_lookup_extent_info(trans, root,
8341                                                        eb->start, level, 1,
8342                                                        &wc->refs[level],
8343                                                        &wc->flags[level]);
8344                         if (ret < 0) {
8345                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8346                                 path->locks[level] = 0;
8347                                 return ret;
8348                         }
8349                         BUG_ON(wc->refs[level] == 0);
8350                         if (wc->refs[level] == 1) {
8351                                 btrfs_tree_unlock_rw(eb, path->locks[level]);
8352                                 path->locks[level] = 0;
8353                                 return 1;
8354                         }
8355                 }
8356         }
8357
8358         /* wc->stage == DROP_REFERENCE */
8359         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8360
8361         if (wc->refs[level] == 1) {
8362                 if (level == 0) {
8363                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8364                                 ret = btrfs_dec_ref(trans, root, eb, 1);
8365                         else
8366                                 ret = btrfs_dec_ref(trans, root, eb, 0);
8367                         BUG_ON(ret); /* -ENOMEM */
8368                         ret = account_leaf_items(trans, root, eb);
8369                         if (ret) {
8370                                 printk_ratelimited(KERN_ERR "BTRFS: %s Error "
8371                                         "%d accounting leaf items. Quota "
8372                                         "is out of sync, rescan required.\n",
8373                                         root->fs_info->sb->s_id, ret);
8374                         }
8375                 }
8376                 /* make block locked assertion in clean_tree_block happy */
8377                 if (!path->locks[level] &&
8378                     btrfs_header_generation(eb) == trans->transid) {
8379                         btrfs_tree_lock(eb);
8380                         btrfs_set_lock_blocking(eb);
8381                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8382                 }
8383                 clean_tree_block(trans, root->fs_info, eb);
8384         }
8385
8386         if (eb == root->node) {
8387                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8388                         parent = eb->start;
8389                 else
8390                         BUG_ON(root->root_key.objectid !=
8391                                btrfs_header_owner(eb));
8392         } else {
8393                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8394                         parent = path->nodes[level + 1]->start;
8395                 else
8396                         BUG_ON(root->root_key.objectid !=
8397                                btrfs_header_owner(path->nodes[level + 1]));
8398         }
8399
8400         btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8401 out:
8402         wc->refs[level] = 0;
8403         wc->flags[level] = 0;
8404         return 0;
8405 }
8406
8407 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8408                                    struct btrfs_root *root,
8409                                    struct btrfs_path *path,
8410                                    struct walk_control *wc)
8411 {
8412         int level = wc->level;
8413         int lookup_info = 1;
8414         int ret;
8415
8416         while (level >= 0) {
8417                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8418                 if (ret > 0)
8419                         break;
8420
8421                 if (level == 0)
8422                         break;
8423
8424                 if (path->slots[level] >=
8425                     btrfs_header_nritems(path->nodes[level]))
8426                         break;
8427
8428                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8429                 if (ret > 0) {
8430                         path->slots[level]++;
8431                         continue;
8432                 } else if (ret < 0)
8433                         return ret;
8434                 level = wc->level;
8435         }
8436         return 0;
8437 }
8438
8439 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8440                                  struct btrfs_root *root,
8441                                  struct btrfs_path *path,
8442                                  struct walk_control *wc, int max_level)
8443 {
8444         int level = wc->level;
8445         int ret;
8446
8447         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8448         while (level < max_level && path->nodes[level]) {
8449                 wc->level = level;
8450                 if (path->slots[level] + 1 <
8451                     btrfs_header_nritems(path->nodes[level])) {
8452                         path->slots[level]++;
8453                         return 0;
8454                 } else {
8455                         ret = walk_up_proc(trans, root, path, wc);
8456                         if (ret > 0)
8457                                 return 0;
8458
8459                         if (path->locks[level]) {
8460                                 btrfs_tree_unlock_rw(path->nodes[level],
8461                                                      path->locks[level]);
8462                                 path->locks[level] = 0;
8463                         }
8464                         free_extent_buffer(path->nodes[level]);
8465                         path->nodes[level] = NULL;
8466                         level++;
8467                 }
8468         }
8469         return 1;
8470 }
8471
8472 /*
8473  * drop a subvolume tree.
8474  *
8475  * this function traverses the tree freeing any blocks that only
8476  * referenced by the tree.
8477  *
8478  * when a shared tree block is found. this function decreases its
8479  * reference count by one. if update_ref is true, this function
8480  * also make sure backrefs for the shared block and all lower level
8481  * blocks are properly updated.
8482  *
8483  * If called with for_reloc == 0, may exit early with -EAGAIN
8484  */
8485 int btrfs_drop_snapshot(struct btrfs_root *root,
8486                          struct btrfs_block_rsv *block_rsv, int update_ref,
8487                          int for_reloc)
8488 {
8489         struct btrfs_path *path;
8490         struct btrfs_trans_handle *trans;
8491         struct btrfs_root *tree_root = root->fs_info->tree_root;
8492         struct btrfs_root_item *root_item = &root->root_item;
8493         struct walk_control *wc;
8494         struct btrfs_key key;
8495         int err = 0;
8496         int ret;
8497         int level;
8498         bool root_dropped = false;
8499
8500         btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8501
8502         path = btrfs_alloc_path();
8503         if (!path) {
8504                 err = -ENOMEM;
8505                 goto out;
8506         }
8507
8508         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8509         if (!wc) {
8510                 btrfs_free_path(path);
8511                 err = -ENOMEM;
8512                 goto out;
8513         }
8514
8515         trans = btrfs_start_transaction(tree_root, 0);
8516         if (IS_ERR(trans)) {
8517                 err = PTR_ERR(trans);
8518                 goto out_free;
8519         }
8520
8521         if (block_rsv)
8522                 trans->block_rsv = block_rsv;
8523
8524         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8525                 level = btrfs_header_level(root->node);
8526                 path->nodes[level] = btrfs_lock_root_node(root);
8527                 btrfs_set_lock_blocking(path->nodes[level]);
8528                 path->slots[level] = 0;
8529                 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8530                 memset(&wc->update_progress, 0,
8531                        sizeof(wc->update_progress));
8532         } else {
8533                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8534                 memcpy(&wc->update_progress, &key,
8535                        sizeof(wc->update_progress));
8536
8537                 level = root_item->drop_level;
8538                 BUG_ON(level == 0);
8539                 path->lowest_level = level;
8540                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8541                 path->lowest_level = 0;
8542                 if (ret < 0) {
8543                         err = ret;
8544                         goto out_end_trans;
8545                 }
8546                 WARN_ON(ret > 0);
8547
8548                 /*
8549                  * unlock our path, this is safe because only this
8550                  * function is allowed to delete this snapshot
8551                  */
8552                 btrfs_unlock_up_safe(path, 0);
8553
8554                 level = btrfs_header_level(root->node);
8555                 while (1) {
8556                         btrfs_tree_lock(path->nodes[level]);
8557                         btrfs_set_lock_blocking(path->nodes[level]);
8558                         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8559
8560                         ret = btrfs_lookup_extent_info(trans, root,
8561                                                 path->nodes[level]->start,
8562                                                 level, 1, &wc->refs[level],
8563                                                 &wc->flags[level]);
8564                         if (ret < 0) {
8565                                 err = ret;
8566                                 goto out_end_trans;
8567                         }
8568                         BUG_ON(wc->refs[level] == 0);
8569
8570                         if (level == root_item->drop_level)
8571                                 break;
8572
8573                         btrfs_tree_unlock(path->nodes[level]);
8574                         path->locks[level] = 0;
8575                         WARN_ON(wc->refs[level] != 1);
8576                         level--;
8577                 }
8578         }
8579
8580         wc->level = level;
8581         wc->shared_level = -1;
8582         wc->stage = DROP_REFERENCE;
8583         wc->update_ref = update_ref;
8584         wc->keep_locks = 0;
8585         wc->for_reloc = for_reloc;
8586         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8587
8588         while (1) {
8589
8590                 ret = walk_down_tree(trans, root, path, wc);
8591                 if (ret < 0) {
8592                         err = ret;
8593                         break;
8594                 }
8595
8596                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8597                 if (ret < 0) {
8598                         err = ret;
8599                         break;
8600                 }
8601
8602                 if (ret > 0) {
8603                         BUG_ON(wc->stage != DROP_REFERENCE);
8604                         break;
8605                 }
8606
8607                 if (wc->stage == DROP_REFERENCE) {
8608                         level = wc->level;
8609                         btrfs_node_key(path->nodes[level],
8610                                        &root_item->drop_progress,
8611                                        path->slots[level]);
8612                         root_item->drop_level = level;
8613                 }
8614
8615                 BUG_ON(wc->level == 0);
8616                 if (btrfs_should_end_transaction(trans, tree_root) ||
8617                     (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8618                         ret = btrfs_update_root(trans, tree_root,
8619                                                 &root->root_key,
8620                                                 root_item);
8621                         if (ret) {
8622                                 btrfs_abort_transaction(trans, tree_root, ret);
8623                                 err = ret;
8624                                 goto out_end_trans;
8625                         }
8626
8627                         btrfs_end_transaction_throttle(trans, tree_root);
8628                         if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8629                                 pr_debug("BTRFS: drop snapshot early exit\n");
8630                                 err = -EAGAIN;
8631                                 goto out_free;
8632                         }
8633
8634                         trans = btrfs_start_transaction(tree_root, 0);
8635                         if (IS_ERR(trans)) {
8636                                 err = PTR_ERR(trans);
8637                                 goto out_free;
8638                         }
8639                         if (block_rsv)
8640                                 trans->block_rsv = block_rsv;
8641                 }
8642         }
8643         btrfs_release_path(path);
8644         if (err)
8645                 goto out_end_trans;
8646
8647         ret = btrfs_del_root(trans, tree_root, &root->root_key);
8648         if (ret) {
8649                 btrfs_abort_transaction(trans, tree_root, ret);
8650                 goto out_end_trans;
8651         }
8652
8653         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8654                 ret = btrfs_find_root(tree_root, &root->root_key, path,
8655                                       NULL, NULL);
8656                 if (ret < 0) {
8657                         btrfs_abort_transaction(trans, tree_root, ret);
8658                         err = ret;
8659                         goto out_end_trans;
8660                 } else if (ret > 0) {
8661                         /* if we fail to delete the orphan item this time
8662                          * around, it'll get picked up the next time.
8663                          *
8664                          * The most common failure here is just -ENOENT.
8665                          */
8666                         btrfs_del_orphan_item(trans, tree_root,
8667                                               root->root_key.objectid);
8668                 }
8669         }
8670
8671         if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8672                 btrfs_add_dropped_root(trans, root);
8673         } else {
8674                 free_extent_buffer(root->node);
8675                 free_extent_buffer(root->commit_root);
8676                 btrfs_put_fs_root(root);
8677         }
8678         root_dropped = true;
8679 out_end_trans:
8680         btrfs_end_transaction_throttle(trans, tree_root);
8681 out_free:
8682         kfree(wc);
8683         btrfs_free_path(path);
8684 out:
8685         /*
8686          * So if we need to stop dropping the snapshot for whatever reason we
8687          * need to make sure to add it back to the dead root list so that we
8688          * keep trying to do the work later.  This also cleans up roots if we
8689          * don't have it in the radix (like when we recover after a power fail
8690          * or unmount) so we don't leak memory.
8691          */
8692         if (!for_reloc && root_dropped == false)
8693                 btrfs_add_dead_root(root);
8694         if (err && err != -EAGAIN)
8695                 btrfs_std_error(root->fs_info, err);
8696         return err;
8697 }
8698
8699 /*
8700  * drop subtree rooted at tree block 'node'.
8701  *
8702  * NOTE: this function will unlock and release tree block 'node'
8703  * only used by relocation code
8704  */
8705 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
8706                         struct btrfs_root *root,
8707                         struct extent_buffer *node,
8708                         struct extent_buffer *parent)
8709 {
8710         struct btrfs_path *path;
8711         struct walk_control *wc;
8712         int level;
8713         int parent_level;
8714         int ret = 0;
8715         int wret;
8716
8717         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
8718
8719         path = btrfs_alloc_path();
8720         if (!path)
8721                 return -ENOMEM;
8722
8723         wc = kzalloc(sizeof(*wc), GFP_NOFS);
8724         if (!wc) {
8725                 btrfs_free_path(path);
8726                 return -ENOMEM;
8727         }
8728
8729         btrfs_assert_tree_locked(parent);
8730         parent_level = btrfs_header_level(parent);
8731         extent_buffer_get(parent);
8732         path->nodes[parent_level] = parent;
8733         path->slots[parent_level] = btrfs_header_nritems(parent);
8734
8735         btrfs_assert_tree_locked(node);
8736         level = btrfs_header_level(node);
8737         path->nodes[level] = node;
8738         path->slots[level] = 0;
8739         path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8740
8741         wc->refs[parent_level] = 1;
8742         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8743         wc->level = level;
8744         wc->shared_level = -1;
8745         wc->stage = DROP_REFERENCE;
8746         wc->update_ref = 0;
8747         wc->keep_locks = 1;
8748         wc->for_reloc = 1;
8749         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8750
8751         while (1) {
8752                 wret = walk_down_tree(trans, root, path, wc);
8753                 if (wret < 0) {
8754                         ret = wret;
8755                         break;
8756                 }
8757
8758                 wret = walk_up_tree(trans, root, path, wc, parent_level);
8759                 if (wret < 0)
8760                         ret = wret;
8761                 if (wret != 0)
8762                         break;
8763         }
8764
8765         kfree(wc);
8766         btrfs_free_path(path);
8767         return ret;
8768 }
8769
8770 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
8771 {
8772         u64 num_devices;
8773         u64 stripped;
8774
8775         /*
8776          * if restripe for this chunk_type is on pick target profile and
8777          * return, otherwise do the usual balance
8778          */
8779         stripped = get_restripe_target(root->fs_info, flags);
8780         if (stripped)
8781                 return extended_to_chunk(stripped);
8782
8783         num_devices = root->fs_info->fs_devices->rw_devices;
8784
8785         stripped = BTRFS_BLOCK_GROUP_RAID0 |
8786                 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8787                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
8788
8789         if (num_devices == 1) {
8790                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8791                 stripped = flags & ~stripped;
8792
8793                 /* turn raid0 into single device chunks */
8794                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
8795                         return stripped;
8796
8797                 /* turn mirroring into duplication */
8798                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
8799                              BTRFS_BLOCK_GROUP_RAID10))
8800                         return stripped | BTRFS_BLOCK_GROUP_DUP;
8801         } else {
8802                 /* they already had raid on here, just return */
8803                 if (flags & stripped)
8804                         return flags;
8805
8806                 stripped |= BTRFS_BLOCK_GROUP_DUP;
8807                 stripped = flags & ~stripped;
8808
8809                 /* switch duplicated blocks with raid1 */
8810                 if (flags & BTRFS_BLOCK_GROUP_DUP)
8811                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
8812
8813                 /* this is drive concat, leave it alone */
8814         }
8815
8816         return flags;
8817 }
8818
8819 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8820 {
8821         struct btrfs_space_info *sinfo = cache->space_info;
8822         u64 num_bytes;
8823         u64 min_allocable_bytes;
8824         int ret = -ENOSPC;
8825
8826         /*
8827          * We need some metadata space and system metadata space for
8828          * allocating chunks in some corner cases until we force to set
8829          * it to be readonly.
8830          */
8831         if ((sinfo->flags &
8832              (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
8833             !force)
8834                 min_allocable_bytes = 1 * 1024 * 1024;
8835         else
8836                 min_allocable_bytes = 0;
8837
8838         spin_lock(&sinfo->lock);
8839         spin_lock(&cache->lock);
8840
8841         if (cache->ro) {
8842                 cache->ro++;
8843                 ret = 0;
8844                 goto out;
8845         }
8846
8847         num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8848                     cache->bytes_super - btrfs_block_group_used(&cache->item);
8849
8850         if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8851             sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
8852             min_allocable_bytes <= sinfo->total_bytes) {
8853                 sinfo->bytes_readonly += num_bytes;
8854                 cache->ro++;
8855                 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8856                 ret = 0;
8857         }
8858 out:
8859         spin_unlock(&cache->lock);
8860         spin_unlock(&sinfo->lock);
8861         return ret;
8862 }
8863
8864 int btrfs_inc_block_group_ro(struct btrfs_root *root,
8865                              struct btrfs_block_group_cache *cache)
8866
8867 {
8868         struct btrfs_trans_handle *trans;
8869         u64 alloc_flags;
8870         int ret;
8871
8872 again:
8873         trans = btrfs_join_transaction(root);
8874         if (IS_ERR(trans))
8875                 return PTR_ERR(trans);
8876
8877         /*
8878          * we're not allowed to set block groups readonly after the dirty
8879          * block groups cache has started writing.  If it already started,
8880          * back off and let this transaction commit
8881          */
8882         mutex_lock(&root->fs_info->ro_block_group_mutex);
8883         if (trans->transaction->dirty_bg_run) {
8884                 u64 transid = trans->transid;
8885
8886                 mutex_unlock(&root->fs_info->ro_block_group_mutex);
8887                 btrfs_end_transaction(trans, root);
8888
8889                 ret = btrfs_wait_for_commit(root, transid);
8890                 if (ret)
8891                         return ret;
8892                 goto again;
8893         }
8894
8895         /*
8896          * if we are changing raid levels, try to allocate a corresponding
8897          * block group with the new raid level.
8898          */
8899         alloc_flags = update_block_group_flags(root, cache->flags);
8900         if (alloc_flags != cache->flags) {
8901                 ret = do_chunk_alloc(trans, root, alloc_flags,
8902                                      CHUNK_ALLOC_FORCE);
8903                 /*
8904                  * ENOSPC is allowed here, we may have enough space
8905                  * already allocated at the new raid level to
8906                  * carry on
8907                  */
8908                 if (ret == -ENOSPC)
8909                         ret = 0;
8910                 if (ret < 0)
8911                         goto out;
8912         }
8913
8914         ret = inc_block_group_ro(cache, 0);
8915         if (!ret)
8916                 goto out;
8917         alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8918         ret = do_chunk_alloc(trans, root, alloc_flags,
8919                              CHUNK_ALLOC_FORCE);
8920         if (ret < 0)
8921                 goto out;
8922         ret = inc_block_group_ro(cache, 0);
8923 out:
8924         if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
8925                 alloc_flags = update_block_group_flags(root, cache->flags);
8926                 lock_chunks(root->fs_info->chunk_root);
8927                 check_system_chunk(trans, root, alloc_flags);
8928                 unlock_chunks(root->fs_info->chunk_root);
8929         }
8930         mutex_unlock(&root->fs_info->ro_block_group_mutex);
8931
8932         btrfs_end_transaction(trans, root);
8933         return ret;
8934 }
8935
8936 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8937                             struct btrfs_root *root, u64 type)
8938 {
8939         u64 alloc_flags = get_alloc_profile(root, type);
8940         return do_chunk_alloc(trans, root, alloc_flags,
8941                               CHUNK_ALLOC_FORCE);
8942 }
8943
8944 /*
8945  * helper to account the unused space of all the readonly block group in the
8946  * space_info. takes mirrors into account.
8947  */
8948 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8949 {
8950         struct btrfs_block_group_cache *block_group;
8951         u64 free_bytes = 0;
8952         int factor;
8953
8954         /* It's df, we don't care if it's racey */
8955         if (list_empty(&sinfo->ro_bgs))
8956                 return 0;
8957
8958         spin_lock(&sinfo->lock);
8959         list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8960                 spin_lock(&block_group->lock);
8961
8962                 if (!block_group->ro) {
8963                         spin_unlock(&block_group->lock);
8964                         continue;
8965                 }
8966
8967                 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8968                                           BTRFS_BLOCK_GROUP_RAID10 |
8969                                           BTRFS_BLOCK_GROUP_DUP))
8970                         factor = 2;
8971                 else
8972                         factor = 1;
8973
8974                 free_bytes += (block_group->key.offset -
8975                                btrfs_block_group_used(&block_group->item)) *
8976                                factor;
8977
8978                 spin_unlock(&block_group->lock);
8979         }
8980         spin_unlock(&sinfo->lock);
8981
8982         return free_bytes;
8983 }
8984
8985 void btrfs_dec_block_group_ro(struct btrfs_root *root,
8986                               struct btrfs_block_group_cache *cache)
8987 {
8988         struct btrfs_space_info *sinfo = cache->space_info;
8989         u64 num_bytes;
8990
8991         BUG_ON(!cache->ro);
8992
8993         spin_lock(&sinfo->lock);
8994         spin_lock(&cache->lock);
8995         if (!--cache->ro) {
8996                 num_bytes = cache->key.offset - cache->reserved -
8997                             cache->pinned - cache->bytes_super -
8998                             btrfs_block_group_used(&cache->item);
8999                 sinfo->bytes_readonly -= num_bytes;
9000                 list_del_init(&cache->ro_list);
9001         }
9002         spin_unlock(&cache->lock);
9003         spin_unlock(&sinfo->lock);
9004 }
9005
9006 /*
9007  * checks to see if its even possible to relocate this block group.
9008  *
9009  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9010  * ok to go ahead and try.
9011  */
9012 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9013 {
9014         struct btrfs_block_group_cache *block_group;
9015         struct btrfs_space_info *space_info;
9016         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9017         struct btrfs_device *device;
9018         struct btrfs_trans_handle *trans;
9019         u64 min_free;
9020         u64 dev_min = 1;
9021         u64 dev_nr = 0;
9022         u64 target;
9023         int index;
9024         int full = 0;
9025         int ret = 0;
9026
9027         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9028
9029         /* odd, couldn't find the block group, leave it alone */
9030         if (!block_group)
9031                 return -1;
9032
9033         min_free = btrfs_block_group_used(&block_group->item);
9034
9035         /* no bytes used, we're good */
9036         if (!min_free)
9037                 goto out;
9038
9039         space_info = block_group->space_info;
9040         spin_lock(&space_info->lock);
9041
9042         full = space_info->full;
9043
9044         /*
9045          * if this is the last block group we have in this space, we can't
9046          * relocate it unless we're able to allocate a new chunk below.
9047          *
9048          * Otherwise, we need to make sure we have room in the space to handle
9049          * all of the extents from this block group.  If we can, we're good
9050          */
9051         if ((space_info->total_bytes != block_group->key.offset) &&
9052             (space_info->bytes_used + space_info->bytes_reserved +
9053              space_info->bytes_pinned + space_info->bytes_readonly +
9054              min_free < space_info->total_bytes)) {
9055                 spin_unlock(&space_info->lock);
9056                 goto out;
9057         }
9058         spin_unlock(&space_info->lock);
9059
9060         /*
9061          * ok we don't have enough space, but maybe we have free space on our
9062          * devices to allocate new chunks for relocation, so loop through our
9063          * alloc devices and guess if we have enough space.  if this block
9064          * group is going to be restriped, run checks against the target
9065          * profile instead of the current one.
9066          */
9067         ret = -1;
9068
9069         /*
9070          * index:
9071          *      0: raid10
9072          *      1: raid1
9073          *      2: dup
9074          *      3: raid0
9075          *      4: single
9076          */
9077         target = get_restripe_target(root->fs_info, block_group->flags);
9078         if (target) {
9079                 index = __get_raid_index(extended_to_chunk(target));
9080         } else {
9081                 /*
9082                  * this is just a balance, so if we were marked as full
9083                  * we know there is no space for a new chunk
9084                  */
9085                 if (full)
9086                         goto out;
9087
9088                 index = get_block_group_index(block_group);
9089         }
9090
9091         if (index == BTRFS_RAID_RAID10) {
9092                 dev_min = 4;
9093                 /* Divide by 2 */
9094                 min_free >>= 1;
9095         } else if (index == BTRFS_RAID_RAID1) {
9096                 dev_min = 2;
9097         } else if (index == BTRFS_RAID_DUP) {
9098                 /* Multiply by 2 */
9099                 min_free <<= 1;
9100         } else if (index == BTRFS_RAID_RAID0) {
9101                 dev_min = fs_devices->rw_devices;
9102                 min_free = div64_u64(min_free, dev_min);
9103         }
9104
9105         /* We need to do this so that we can look at pending chunks */
9106         trans = btrfs_join_transaction(root);
9107         if (IS_ERR(trans)) {
9108                 ret = PTR_ERR(trans);
9109                 goto out;
9110         }
9111
9112         mutex_lock(&root->fs_info->chunk_mutex);
9113         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9114                 u64 dev_offset;
9115
9116                 /*
9117                  * check to make sure we can actually find a chunk with enough
9118                  * space to fit our block group in.
9119                  */
9120                 if (device->total_bytes > device->bytes_used + min_free &&
9121                     !device->is_tgtdev_for_dev_replace) {
9122                         ret = find_free_dev_extent(trans, device, min_free,
9123                                                    &dev_offset, NULL);
9124                         if (!ret)
9125                                 dev_nr++;
9126
9127                         if (dev_nr >= dev_min)
9128                                 break;
9129
9130                         ret = -1;
9131                 }
9132         }
9133         mutex_unlock(&root->fs_info->chunk_mutex);
9134         btrfs_end_transaction(trans, root);
9135 out:
9136         btrfs_put_block_group(block_group);
9137         return ret;
9138 }
9139
9140 static int find_first_block_group(struct btrfs_root *root,
9141                 struct btrfs_path *path, struct btrfs_key *key)
9142 {
9143         int ret = 0;
9144         struct btrfs_key found_key;
9145         struct extent_buffer *leaf;
9146         int slot;
9147
9148         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9149         if (ret < 0)
9150                 goto out;
9151
9152         while (1) {
9153                 slot = path->slots[0];
9154                 leaf = path->nodes[0];
9155                 if (slot >= btrfs_header_nritems(leaf)) {
9156                         ret = btrfs_next_leaf(root, path);
9157                         if (ret == 0)
9158                                 continue;
9159                         if (ret < 0)
9160                                 goto out;
9161                         break;
9162                 }
9163                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9164
9165                 if (found_key.objectid >= key->objectid &&
9166                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9167                         ret = 0;
9168                         goto out;
9169                 }
9170                 path->slots[0]++;
9171         }
9172 out:
9173         return ret;
9174 }
9175
9176 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9177 {
9178         struct btrfs_block_group_cache *block_group;
9179         u64 last = 0;
9180
9181         while (1) {
9182                 struct inode *inode;
9183
9184                 block_group = btrfs_lookup_first_block_group(info, last);
9185                 while (block_group) {
9186                         spin_lock(&block_group->lock);
9187                         if (block_group->iref)
9188                                 break;
9189                         spin_unlock(&block_group->lock);
9190                         block_group = next_block_group(info->tree_root,
9191                                                        block_group);
9192                 }
9193                 if (!block_group) {
9194                         if (last == 0)
9195                                 break;
9196                         last = 0;
9197                         continue;
9198                 }
9199
9200                 inode = block_group->inode;
9201                 block_group->iref = 0;
9202                 block_group->inode = NULL;
9203                 spin_unlock(&block_group->lock);
9204                 iput(inode);
9205                 last = block_group->key.objectid + block_group->key.offset;
9206                 btrfs_put_block_group(block_group);
9207         }
9208 }
9209
9210 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9211 {
9212         struct btrfs_block_group_cache *block_group;
9213         struct btrfs_space_info *space_info;
9214         struct btrfs_caching_control *caching_ctl;
9215         struct rb_node *n;
9216
9217         down_write(&info->commit_root_sem);
9218         while (!list_empty(&info->caching_block_groups)) {
9219                 caching_ctl = list_entry(info->caching_block_groups.next,
9220                                          struct btrfs_caching_control, list);
9221                 list_del(&caching_ctl->list);
9222                 put_caching_control(caching_ctl);
9223         }
9224         up_write(&info->commit_root_sem);
9225
9226         spin_lock(&info->unused_bgs_lock);
9227         while (!list_empty(&info->unused_bgs)) {
9228                 block_group = list_first_entry(&info->unused_bgs,
9229                                                struct btrfs_block_group_cache,
9230                                                bg_list);
9231                 list_del_init(&block_group->bg_list);
9232                 btrfs_put_block_group(block_group);
9233         }
9234         spin_unlock(&info->unused_bgs_lock);
9235
9236         spin_lock(&info->block_group_cache_lock);
9237         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9238                 block_group = rb_entry(n, struct btrfs_block_group_cache,
9239                                        cache_node);
9240                 rb_erase(&block_group->cache_node,
9241                          &info->block_group_cache_tree);
9242                 RB_CLEAR_NODE(&block_group->cache_node);
9243                 spin_unlock(&info->block_group_cache_lock);
9244
9245                 down_write(&block_group->space_info->groups_sem);
9246                 list_del(&block_group->list);
9247                 up_write(&block_group->space_info->groups_sem);
9248
9249                 if (block_group->cached == BTRFS_CACHE_STARTED)
9250                         wait_block_group_cache_done(block_group);
9251
9252                 /*
9253                  * We haven't cached this block group, which means we could
9254                  * possibly have excluded extents on this block group.
9255                  */
9256                 if (block_group->cached == BTRFS_CACHE_NO ||
9257                     block_group->cached == BTRFS_CACHE_ERROR)
9258                         free_excluded_extents(info->extent_root, block_group);
9259
9260                 btrfs_remove_free_space_cache(block_group);
9261                 btrfs_put_block_group(block_group);
9262
9263                 spin_lock(&info->block_group_cache_lock);
9264         }
9265         spin_unlock(&info->block_group_cache_lock);
9266
9267         /* now that all the block groups are freed, go through and
9268          * free all the space_info structs.  This is only called during
9269          * the final stages of unmount, and so we know nobody is
9270          * using them.  We call synchronize_rcu() once before we start,
9271          * just to be on the safe side.
9272          */
9273         synchronize_rcu();
9274
9275         release_global_block_rsv(info);
9276
9277         while (!list_empty(&info->space_info)) {
9278                 int i;
9279
9280                 space_info = list_entry(info->space_info.next,
9281                                         struct btrfs_space_info,
9282                                         list);
9283                 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9284                         if (WARN_ON(space_info->bytes_pinned > 0 ||
9285                             space_info->bytes_reserved > 0 ||
9286                             space_info->bytes_may_use > 0)) {
9287                                 dump_space_info(space_info, 0, 0);
9288                         }
9289                 }
9290                 list_del(&space_info->list);
9291                 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9292                         struct kobject *kobj;
9293                         kobj = space_info->block_group_kobjs[i];
9294                         space_info->block_group_kobjs[i] = NULL;
9295                         if (kobj) {
9296                                 kobject_del(kobj);
9297                                 kobject_put(kobj);
9298                         }
9299                 }
9300                 kobject_del(&space_info->kobj);
9301                 kobject_put(&space_info->kobj);
9302         }
9303         return 0;
9304 }
9305
9306 static void __link_block_group(struct btrfs_space_info *space_info,
9307                                struct btrfs_block_group_cache *cache)
9308 {
9309         int index = get_block_group_index(cache);
9310         bool first = false;
9311
9312         down_write(&space_info->groups_sem);
9313         if (list_empty(&space_info->block_groups[index]))
9314                 first = true;
9315         list_add_tail(&cache->list, &space_info->block_groups[index]);
9316         up_write(&space_info->groups_sem);
9317
9318         if (first) {
9319                 struct raid_kobject *rkobj;
9320                 int ret;
9321
9322                 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9323                 if (!rkobj)
9324                         goto out_err;
9325                 rkobj->raid_type = index;
9326                 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9327                 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9328                                   "%s", get_raid_name(index));
9329                 if (ret) {
9330                         kobject_put(&rkobj->kobj);
9331                         goto out_err;
9332                 }
9333                 space_info->block_group_kobjs[index] = &rkobj->kobj;
9334         }
9335
9336         return;
9337 out_err:
9338         pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9339 }
9340
9341 static struct btrfs_block_group_cache *
9342 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9343 {
9344         struct btrfs_block_group_cache *cache;
9345
9346         cache = kzalloc(sizeof(*cache), GFP_NOFS);
9347         if (!cache)
9348                 return NULL;
9349
9350         cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9351                                         GFP_NOFS);
9352         if (!cache->free_space_ctl) {
9353                 kfree(cache);
9354                 return NULL;
9355         }
9356
9357         cache->key.objectid = start;
9358         cache->key.offset = size;
9359         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9360
9361         cache->sectorsize = root->sectorsize;
9362         cache->fs_info = root->fs_info;
9363         cache->full_stripe_len = btrfs_full_stripe_len(root,
9364                                                &root->fs_info->mapping_tree,
9365                                                start);
9366         atomic_set(&cache->count, 1);
9367         spin_lock_init(&cache->lock);
9368         init_rwsem(&cache->data_rwsem);
9369         INIT_LIST_HEAD(&cache->list);
9370         INIT_LIST_HEAD(&cache->cluster_list);
9371         INIT_LIST_HEAD(&cache->bg_list);
9372         INIT_LIST_HEAD(&cache->ro_list);
9373         INIT_LIST_HEAD(&cache->dirty_list);
9374         INIT_LIST_HEAD(&cache->io_list);
9375         btrfs_init_free_space_ctl(cache);
9376         atomic_set(&cache->trimming, 0);
9377
9378         return cache;
9379 }
9380
9381 int btrfs_read_block_groups(struct btrfs_root *root)
9382 {
9383         struct btrfs_path *path;
9384         int ret;
9385         struct btrfs_block_group_cache *cache;
9386         struct btrfs_fs_info *info = root->fs_info;
9387         struct btrfs_space_info *space_info;
9388         struct btrfs_key key;
9389         struct btrfs_key found_key;
9390         struct extent_buffer *leaf;
9391         int need_clear = 0;
9392         u64 cache_gen;
9393
9394         root = info->extent_root;
9395         key.objectid = 0;
9396         key.offset = 0;
9397         key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9398         path = btrfs_alloc_path();
9399         if (!path)
9400                 return -ENOMEM;
9401         path->reada = 1;
9402
9403         cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9404         if (btrfs_test_opt(root, SPACE_CACHE) &&
9405             btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9406                 need_clear = 1;
9407         if (btrfs_test_opt(root, CLEAR_CACHE))
9408                 need_clear = 1;
9409
9410         while (1) {
9411                 ret = find_first_block_group(root, path, &key);
9412                 if (ret > 0)
9413                         break;
9414                 if (ret != 0)
9415                         goto error;
9416
9417                 leaf = path->nodes[0];
9418                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9419
9420                 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9421                                                        found_key.offset);
9422                 if (!cache) {
9423                         ret = -ENOMEM;
9424                         goto error;
9425                 }
9426
9427                 if (need_clear) {
9428                         /*
9429                          * When we mount with old space cache, we need to
9430                          * set BTRFS_DC_CLEAR and set dirty flag.
9431                          *
9432                          * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9433                          *    truncate the old free space cache inode and
9434                          *    setup a new one.
9435                          * b) Setting 'dirty flag' makes sure that we flush
9436                          *    the new space cache info onto disk.
9437                          */
9438                         if (btrfs_test_opt(root, SPACE_CACHE))
9439                                 cache->disk_cache_state = BTRFS_DC_CLEAR;
9440                 }
9441
9442                 read_extent_buffer(leaf, &cache->item,
9443                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
9444                                    sizeof(cache->item));
9445                 cache->flags = btrfs_block_group_flags(&cache->item);
9446
9447                 key.objectid = found_key.objectid + found_key.offset;
9448                 btrfs_release_path(path);
9449
9450                 /*
9451                  * We need to exclude the super stripes now so that the space
9452                  * info has super bytes accounted for, otherwise we'll think
9453                  * we have more space than we actually do.
9454                  */
9455                 ret = exclude_super_stripes(root, cache);
9456                 if (ret) {
9457                         /*
9458                          * We may have excluded something, so call this just in
9459                          * case.
9460                          */
9461                         free_excluded_extents(root, cache);
9462                         btrfs_put_block_group(cache);
9463                         goto error;
9464                 }
9465
9466                 /*
9467                  * check for two cases, either we are full, and therefore
9468                  * don't need to bother with the caching work since we won't
9469                  * find any space, or we are empty, and we can just add all
9470                  * the space in and be done with it.  This saves us _alot_ of
9471                  * time, particularly in the full case.
9472                  */
9473                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9474                         cache->last_byte_to_unpin = (u64)-1;
9475                         cache->cached = BTRFS_CACHE_FINISHED;
9476                         free_excluded_extents(root, cache);
9477                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9478                         cache->last_byte_to_unpin = (u64)-1;
9479                         cache->cached = BTRFS_CACHE_FINISHED;
9480                         add_new_free_space(cache, root->fs_info,
9481                                            found_key.objectid,
9482                                            found_key.objectid +
9483                                            found_key.offset);
9484                         free_excluded_extents(root, cache);
9485                 }
9486
9487                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9488                 if (ret) {
9489                         btrfs_remove_free_space_cache(cache);
9490                         btrfs_put_block_group(cache);
9491                         goto error;
9492                 }
9493
9494                 ret = update_space_info(info, cache->flags, found_key.offset,
9495                                         btrfs_block_group_used(&cache->item),
9496                                         &space_info);
9497                 if (ret) {
9498                         btrfs_remove_free_space_cache(cache);
9499                         spin_lock(&info->block_group_cache_lock);
9500                         rb_erase(&cache->cache_node,
9501                                  &info->block_group_cache_tree);
9502                         RB_CLEAR_NODE(&cache->cache_node);
9503                         spin_unlock(&info->block_group_cache_lock);
9504                         btrfs_put_block_group(cache);
9505                         goto error;
9506                 }
9507
9508                 cache->space_info = space_info;
9509                 spin_lock(&cache->space_info->lock);
9510                 cache->space_info->bytes_readonly += cache->bytes_super;
9511                 spin_unlock(&cache->space_info->lock);
9512
9513                 __link_block_group(space_info, cache);
9514
9515                 set_avail_alloc_bits(root->fs_info, cache->flags);
9516                 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9517                         inc_block_group_ro(cache, 1);
9518                 } else if (btrfs_block_group_used(&cache->item) == 0) {
9519                         spin_lock(&info->unused_bgs_lock);
9520                         /* Should always be true but just in case. */
9521                         if (list_empty(&cache->bg_list)) {
9522                                 btrfs_get_block_group(cache);
9523                                 list_add_tail(&cache->bg_list,
9524                                               &info->unused_bgs);
9525                         }
9526                         spin_unlock(&info->unused_bgs_lock);
9527                 }
9528         }
9529
9530         list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9531                 if (!(get_alloc_profile(root, space_info->flags) &
9532                       (BTRFS_BLOCK_GROUP_RAID10 |
9533                        BTRFS_BLOCK_GROUP_RAID1 |
9534                        BTRFS_BLOCK_GROUP_RAID5 |
9535                        BTRFS_BLOCK_GROUP_RAID6 |
9536                        BTRFS_BLOCK_GROUP_DUP)))
9537                         continue;
9538                 /*
9539                  * avoid allocating from un-mirrored block group if there are
9540                  * mirrored block groups.
9541                  */
9542                 list_for_each_entry(cache,
9543                                 &space_info->block_groups[BTRFS_RAID_RAID0],
9544                                 list)
9545                         inc_block_group_ro(cache, 1);
9546                 list_for_each_entry(cache,
9547                                 &space_info->block_groups[BTRFS_RAID_SINGLE],
9548                                 list)
9549                         inc_block_group_ro(cache, 1);
9550         }
9551
9552         init_global_block_rsv(info);
9553         ret = 0;
9554 error:
9555         btrfs_free_path(path);
9556         return ret;
9557 }
9558
9559 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9560                                        struct btrfs_root *root)
9561 {
9562         struct btrfs_block_group_cache *block_group, *tmp;
9563         struct btrfs_root *extent_root = root->fs_info->extent_root;
9564         struct btrfs_block_group_item item;
9565         struct btrfs_key key;
9566         int ret = 0;
9567         bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9568
9569         trans->can_flush_pending_bgs = false;
9570         list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9571                 if (ret)
9572                         goto next;
9573
9574                 spin_lock(&block_group->lock);
9575                 memcpy(&item, &block_group->item, sizeof(item));
9576                 memcpy(&key, &block_group->key, sizeof(key));
9577                 spin_unlock(&block_group->lock);
9578
9579                 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9580                                         sizeof(item));
9581                 if (ret)
9582                         btrfs_abort_transaction(trans, extent_root, ret);
9583                 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9584                                                key.objectid, key.offset);
9585                 if (ret)
9586                         btrfs_abort_transaction(trans, extent_root, ret);
9587 next:
9588                 list_del_init(&block_group->bg_list);
9589         }
9590         trans->can_flush_pending_bgs = can_flush_pending_bgs;
9591 }
9592
9593 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9594                            struct btrfs_root *root, u64 bytes_used,
9595                            u64 type, u64 chunk_objectid, u64 chunk_offset,
9596                            u64 size)
9597 {
9598         int ret;
9599         struct btrfs_root *extent_root;
9600         struct btrfs_block_group_cache *cache;
9601
9602         extent_root = root->fs_info->extent_root;
9603
9604         btrfs_set_log_full_commit(root->fs_info, trans);
9605
9606         cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9607         if (!cache)
9608                 return -ENOMEM;
9609
9610         btrfs_set_block_group_used(&cache->item, bytes_used);
9611         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9612         btrfs_set_block_group_flags(&cache->item, type);
9613
9614         cache->flags = type;
9615         cache->last_byte_to_unpin = (u64)-1;
9616         cache->cached = BTRFS_CACHE_FINISHED;
9617         ret = exclude_super_stripes(root, cache);
9618         if (ret) {
9619                 /*
9620                  * We may have excluded something, so call this just in
9621                  * case.
9622                  */
9623                 free_excluded_extents(root, cache);
9624                 btrfs_put_block_group(cache);
9625                 return ret;
9626         }
9627
9628         add_new_free_space(cache, root->fs_info, chunk_offset,
9629                            chunk_offset + size);
9630
9631         free_excluded_extents(root, cache);
9632
9633         /*
9634          * Call to ensure the corresponding space_info object is created and
9635          * assigned to our block group, but don't update its counters just yet.
9636          * We want our bg to be added to the rbtree with its ->space_info set.
9637          */
9638         ret = update_space_info(root->fs_info, cache->flags, 0, 0,
9639                                 &cache->space_info);
9640         if (ret) {
9641                 btrfs_remove_free_space_cache(cache);
9642                 btrfs_put_block_group(cache);
9643                 return ret;
9644         }
9645
9646         ret = btrfs_add_block_group_cache(root->fs_info, cache);
9647         if (ret) {
9648                 btrfs_remove_free_space_cache(cache);
9649                 btrfs_put_block_group(cache);
9650                 return ret;
9651         }
9652
9653         /*
9654          * Now that our block group has its ->space_info set and is inserted in
9655          * the rbtree, update the space info's counters.
9656          */
9657         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
9658                                 &cache->space_info);
9659         if (ret) {
9660                 btrfs_remove_free_space_cache(cache);
9661                 spin_lock(&root->fs_info->block_group_cache_lock);
9662                 rb_erase(&cache->cache_node,
9663                          &root->fs_info->block_group_cache_tree);
9664                 RB_CLEAR_NODE(&cache->cache_node);
9665                 spin_unlock(&root->fs_info->block_group_cache_lock);
9666                 btrfs_put_block_group(cache);
9667                 return ret;
9668         }
9669         update_global_block_rsv(root->fs_info);
9670
9671         spin_lock(&cache->space_info->lock);
9672         cache->space_info->bytes_readonly += cache->bytes_super;
9673         spin_unlock(&cache->space_info->lock);
9674
9675         __link_block_group(cache->space_info, cache);
9676
9677         list_add_tail(&cache->bg_list, &trans->new_bgs);
9678
9679         set_avail_alloc_bits(extent_root->fs_info, type);
9680
9681         return 0;
9682 }
9683
9684 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9685 {
9686         u64 extra_flags = chunk_to_extended(flags) &
9687                                 BTRFS_EXTENDED_PROFILE_MASK;
9688
9689         write_seqlock(&fs_info->profiles_lock);
9690         if (flags & BTRFS_BLOCK_GROUP_DATA)
9691                 fs_info->avail_data_alloc_bits &= ~extra_flags;
9692         if (flags & BTRFS_BLOCK_GROUP_METADATA)
9693                 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
9694         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
9695                 fs_info->avail_system_alloc_bits &= ~extra_flags;
9696         write_sequnlock(&fs_info->profiles_lock);
9697 }
9698
9699 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9700                              struct btrfs_root *root, u64 group_start,
9701                              struct extent_map *em)
9702 {
9703         struct btrfs_path *path;
9704         struct btrfs_block_group_cache *block_group;
9705         struct btrfs_free_cluster *cluster;
9706         struct btrfs_root *tree_root = root->fs_info->tree_root;
9707         struct btrfs_key key;
9708         struct inode *inode;
9709         struct kobject *kobj = NULL;
9710         int ret;
9711         int index;
9712         int factor;
9713         struct btrfs_caching_control *caching_ctl = NULL;
9714         bool remove_em;
9715
9716         root = root->fs_info->extent_root;
9717
9718         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
9719         BUG_ON(!block_group);
9720         BUG_ON(!block_group->ro);
9721
9722         /*
9723          * Free the reserved super bytes from this block group before
9724          * remove it.
9725          */
9726         free_excluded_extents(root, block_group);
9727
9728         memcpy(&key, &block_group->key, sizeof(key));
9729         index = get_block_group_index(block_group);
9730         if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
9731                                   BTRFS_BLOCK_GROUP_RAID1 |
9732                                   BTRFS_BLOCK_GROUP_RAID10))
9733                 factor = 2;
9734         else
9735                 factor = 1;
9736
9737         /* make sure this block group isn't part of an allocation cluster */
9738         cluster = &root->fs_info->data_alloc_cluster;
9739         spin_lock(&cluster->refill_lock);
9740         btrfs_return_cluster_to_free_space(block_group, cluster);
9741         spin_unlock(&cluster->refill_lock);
9742
9743         /*
9744          * make sure this block group isn't part of a metadata
9745          * allocation cluster
9746          */
9747         cluster = &root->fs_info->meta_alloc_cluster;
9748         spin_lock(&cluster->refill_lock);
9749         btrfs_return_cluster_to_free_space(block_group, cluster);
9750         spin_unlock(&cluster->refill_lock);
9751
9752         path = btrfs_alloc_path();
9753         if (!path) {
9754                 ret = -ENOMEM;
9755                 goto out;
9756         }
9757
9758         /*
9759          * get the inode first so any iput calls done for the io_list
9760          * aren't the final iput (no unlinks allowed now)
9761          */
9762         inode = lookup_free_space_inode(tree_root, block_group, path);
9763
9764         mutex_lock(&trans->transaction->cache_write_mutex);
9765         /*
9766          * make sure our free spache cache IO is done before remove the
9767          * free space inode
9768          */
9769         spin_lock(&trans->transaction->dirty_bgs_lock);
9770         if (!list_empty(&block_group->io_list)) {
9771                 list_del_init(&block_group->io_list);
9772
9773                 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
9774
9775                 spin_unlock(&trans->transaction->dirty_bgs_lock);
9776                 btrfs_wait_cache_io(root, trans, block_group,
9777                                     &block_group->io_ctl, path,
9778                                     block_group->key.objectid);
9779                 btrfs_put_block_group(block_group);
9780                 spin_lock(&trans->transaction->dirty_bgs_lock);
9781         }
9782
9783         if (!list_empty(&block_group->dirty_list)) {
9784                 list_del_init(&block_group->dirty_list);
9785                 btrfs_put_block_group(block_group);
9786         }
9787         spin_unlock(&trans->transaction->dirty_bgs_lock);
9788         mutex_unlock(&trans->transaction->cache_write_mutex);
9789
9790         if (!IS_ERR(inode)) {
9791                 ret = btrfs_orphan_add(trans, inode);
9792                 if (ret) {
9793                         btrfs_add_delayed_iput(inode);
9794                         goto out;
9795                 }
9796                 clear_nlink(inode);
9797                 /* One for the block groups ref */
9798                 spin_lock(&block_group->lock);
9799                 if (block_group->iref) {
9800                         block_group->iref = 0;
9801                         block_group->inode = NULL;
9802                         spin_unlock(&block_group->lock);
9803                         iput(inode);
9804                 } else {
9805                         spin_unlock(&block_group->lock);
9806                 }
9807                 /* One for our lookup ref */
9808                 btrfs_add_delayed_iput(inode);
9809         }
9810
9811         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
9812         key.offset = block_group->key.objectid;
9813         key.type = 0;
9814
9815         ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
9816         if (ret < 0)
9817                 goto out;
9818         if (ret > 0)
9819                 btrfs_release_path(path);
9820         if (ret == 0) {
9821                 ret = btrfs_del_item(trans, tree_root, path);
9822                 if (ret)
9823                         goto out;
9824                 btrfs_release_path(path);
9825         }
9826
9827         spin_lock(&root->fs_info->block_group_cache_lock);
9828         rb_erase(&block_group->cache_node,
9829                  &root->fs_info->block_group_cache_tree);
9830         RB_CLEAR_NODE(&block_group->cache_node);
9831
9832         if (root->fs_info->first_logical_byte == block_group->key.objectid)
9833                 root->fs_info->first_logical_byte = (u64)-1;
9834         spin_unlock(&root->fs_info->block_group_cache_lock);
9835
9836         down_write(&block_group->space_info->groups_sem);
9837         /*
9838          * we must use list_del_init so people can check to see if they
9839          * are still on the list after taking the semaphore
9840          */
9841         list_del_init(&block_group->list);
9842         if (list_empty(&block_group->space_info->block_groups[index])) {
9843                 kobj = block_group->space_info->block_group_kobjs[index];
9844                 block_group->space_info->block_group_kobjs[index] = NULL;
9845                 clear_avail_alloc_bits(root->fs_info, block_group->flags);
9846         }
9847         up_write(&block_group->space_info->groups_sem);
9848         if (kobj) {
9849                 kobject_del(kobj);
9850                 kobject_put(kobj);
9851         }
9852
9853         if (block_group->has_caching_ctl)
9854                 caching_ctl = get_caching_control(block_group);
9855         if (block_group->cached == BTRFS_CACHE_STARTED)
9856                 wait_block_group_cache_done(block_group);
9857         if (block_group->has_caching_ctl) {
9858                 down_write(&root->fs_info->commit_root_sem);
9859                 if (!caching_ctl) {
9860                         struct btrfs_caching_control *ctl;
9861
9862                         list_for_each_entry(ctl,
9863                                     &root->fs_info->caching_block_groups, list)
9864                                 if (ctl->block_group == block_group) {
9865                                         caching_ctl = ctl;
9866                                         atomic_inc(&caching_ctl->count);
9867                                         break;
9868                                 }
9869                 }
9870                 if (caching_ctl)
9871                         list_del_init(&caching_ctl->list);
9872                 up_write(&root->fs_info->commit_root_sem);
9873                 if (caching_ctl) {
9874                         /* Once for the caching bgs list and once for us. */
9875                         put_caching_control(caching_ctl);
9876                         put_caching_control(caching_ctl);
9877                 }
9878         }
9879
9880         spin_lock(&trans->transaction->dirty_bgs_lock);
9881         if (!list_empty(&block_group->dirty_list)) {
9882                 WARN_ON(1);
9883         }
9884         if (!list_empty(&block_group->io_list)) {
9885                 WARN_ON(1);
9886         }
9887         spin_unlock(&trans->transaction->dirty_bgs_lock);
9888         btrfs_remove_free_space_cache(block_group);
9889
9890         spin_lock(&block_group->space_info->lock);
9891         list_del_init(&block_group->ro_list);
9892
9893         if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
9894                 WARN_ON(block_group->space_info->total_bytes
9895                         < block_group->key.offset);
9896                 WARN_ON(block_group->space_info->bytes_readonly
9897                         < block_group->key.offset);
9898                 WARN_ON(block_group->space_info->disk_total
9899                         < block_group->key.offset * factor);
9900         }
9901         block_group->space_info->total_bytes -= block_group->key.offset;
9902         block_group->space_info->bytes_readonly -= block_group->key.offset;
9903         block_group->space_info->disk_total -= block_group->key.offset * factor;
9904
9905         spin_unlock(&block_group->space_info->lock);
9906
9907         memcpy(&key, &block_group->key, sizeof(key));
9908
9909         lock_chunks(root);
9910         if (!list_empty(&em->list)) {
9911                 /* We're in the transaction->pending_chunks list. */
9912                 free_extent_map(em);
9913         }
9914         spin_lock(&block_group->lock);
9915         block_group->removed = 1;
9916         /*
9917          * At this point trimming can't start on this block group, because we
9918          * removed the block group from the tree fs_info->block_group_cache_tree
9919          * so no one can't find it anymore and even if someone already got this
9920          * block group before we removed it from the rbtree, they have already
9921          * incremented block_group->trimming - if they didn't, they won't find
9922          * any free space entries because we already removed them all when we
9923          * called btrfs_remove_free_space_cache().
9924          *
9925          * And we must not remove the extent map from the fs_info->mapping_tree
9926          * to prevent the same logical address range and physical device space
9927          * ranges from being reused for a new block group. This is because our
9928          * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9929          * completely transactionless, so while it is trimming a range the
9930          * currently running transaction might finish and a new one start,
9931          * allowing for new block groups to be created that can reuse the same
9932          * physical device locations unless we take this special care.
9933          *
9934          * There may also be an implicit trim operation if the file system
9935          * is mounted with -odiscard. The same protections must remain
9936          * in place until the extents have been discarded completely when
9937          * the transaction commit has completed.
9938          */
9939         remove_em = (atomic_read(&block_group->trimming) == 0);
9940         /*
9941          * Make sure a trimmer task always sees the em in the pinned_chunks list
9942          * if it sees block_group->removed == 1 (needs to lock block_group->lock
9943          * before checking block_group->removed).
9944          */
9945         if (!remove_em) {
9946                 /*
9947                  * Our em might be in trans->transaction->pending_chunks which
9948                  * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9949                  * and so is the fs_info->pinned_chunks list.
9950                  *
9951                  * So at this point we must be holding the chunk_mutex to avoid
9952                  * any races with chunk allocation (more specifically at
9953                  * volumes.c:contains_pending_extent()), to ensure it always
9954                  * sees the em, either in the pending_chunks list or in the
9955                  * pinned_chunks list.
9956                  */
9957                 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9958         }
9959         spin_unlock(&block_group->lock);
9960
9961         if (remove_em) {
9962                 struct extent_map_tree *em_tree;
9963
9964                 em_tree = &root->fs_info->mapping_tree.map_tree;
9965                 write_lock(&em_tree->lock);
9966                 /*
9967                  * The em might be in the pending_chunks list, so make sure the
9968                  * chunk mutex is locked, since remove_extent_mapping() will
9969                  * delete us from that list.
9970                  */
9971                 remove_extent_mapping(em_tree, em);
9972                 write_unlock(&em_tree->lock);
9973                 /* once for the tree */
9974                 free_extent_map(em);
9975         }
9976
9977         unlock_chunks(root);
9978
9979         btrfs_put_block_group(block_group);
9980         btrfs_put_block_group(block_group);
9981
9982         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
9983         if (ret > 0)
9984                 ret = -EIO;
9985         if (ret < 0)
9986                 goto out;
9987
9988         ret = btrfs_del_item(trans, root, path);
9989 out:
9990         btrfs_free_path(path);
9991         return ret;
9992 }
9993
9994 /*
9995  * Process the unused_bgs list and remove any that don't have any allocated
9996  * space inside of them.
9997  */
9998 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9999 {
10000         struct btrfs_block_group_cache *block_group;
10001         struct btrfs_space_info *space_info;
10002         struct btrfs_root *root = fs_info->extent_root;
10003         struct btrfs_trans_handle *trans;
10004         int ret = 0;
10005
10006         if (!fs_info->open)
10007                 return;
10008
10009         spin_lock(&fs_info->unused_bgs_lock);
10010         while (!list_empty(&fs_info->unused_bgs)) {
10011                 u64 start, end;
10012                 int trimming;
10013
10014                 block_group = list_first_entry(&fs_info->unused_bgs,
10015                                                struct btrfs_block_group_cache,
10016                                                bg_list);
10017                 space_info = block_group->space_info;
10018                 list_del_init(&block_group->bg_list);
10019                 if (ret || btrfs_mixed_space_info(space_info)) {
10020                         btrfs_put_block_group(block_group);
10021                         continue;
10022                 }
10023                 spin_unlock(&fs_info->unused_bgs_lock);
10024
10025                 mutex_lock(&root->fs_info->delete_unused_bgs_mutex);
10026
10027                 /* Don't want to race with allocators so take the groups_sem */
10028                 down_write(&space_info->groups_sem);
10029                 spin_lock(&block_group->lock);
10030                 if (block_group->reserved ||
10031                     btrfs_block_group_used(&block_group->item) ||
10032                     block_group->ro) {
10033                         /*
10034                          * We want to bail if we made new allocations or have
10035                          * outstanding allocations in this block group.  We do
10036                          * the ro check in case balance is currently acting on
10037                          * this block group.
10038                          */
10039                         spin_unlock(&block_group->lock);
10040                         up_write(&space_info->groups_sem);
10041                         goto next;
10042                 }
10043                 spin_unlock(&block_group->lock);
10044
10045                 /* We don't want to force the issue, only flip if it's ok. */
10046                 ret = inc_block_group_ro(block_group, 0);
10047                 up_write(&space_info->groups_sem);
10048                 if (ret < 0) {
10049                         ret = 0;
10050                         goto next;
10051                 }
10052
10053                 /*
10054                  * Want to do this before we do anything else so we can recover
10055                  * properly if we fail to join the transaction.
10056                  */
10057                 /* 1 for btrfs_orphan_reserve_metadata() */
10058                 trans = btrfs_start_transaction(root, 1);
10059                 if (IS_ERR(trans)) {
10060                         btrfs_dec_block_group_ro(root, block_group);
10061                         ret = PTR_ERR(trans);
10062                         goto next;
10063                 }
10064
10065                 /*
10066                  * We could have pending pinned extents for this block group,
10067                  * just delete them, we don't care about them anymore.
10068                  */
10069                 start = block_group->key.objectid;
10070                 end = start + block_group->key.offset - 1;
10071                 /*
10072                  * Hold the unused_bg_unpin_mutex lock to avoid racing with
10073                  * btrfs_finish_extent_commit(). If we are at transaction N,
10074                  * another task might be running finish_extent_commit() for the
10075                  * previous transaction N - 1, and have seen a range belonging
10076                  * to the block group in freed_extents[] before we were able to
10077                  * clear the whole block group range from freed_extents[]. This
10078                  * means that task can lookup for the block group after we
10079                  * unpinned it from freed_extents[] and removed it, leading to
10080                  * a BUG_ON() at btrfs_unpin_extent_range().
10081                  */
10082                 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10083                 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10084                                   EXTENT_DIRTY, GFP_NOFS);
10085                 if (ret) {
10086                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10087                         btrfs_dec_block_group_ro(root, block_group);
10088                         goto end_trans;
10089                 }
10090                 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10091                                   EXTENT_DIRTY, GFP_NOFS);
10092                 if (ret) {
10093                         mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10094                         btrfs_dec_block_group_ro(root, block_group);
10095                         goto end_trans;
10096                 }
10097                 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10098
10099                 /* Reset pinned so btrfs_put_block_group doesn't complain */
10100                 spin_lock(&space_info->lock);
10101                 spin_lock(&block_group->lock);
10102
10103                 space_info->bytes_pinned -= block_group->pinned;
10104                 space_info->bytes_readonly += block_group->pinned;
10105                 percpu_counter_add(&space_info->total_bytes_pinned,
10106                                    -block_group->pinned);
10107                 block_group->pinned = 0;
10108
10109                 spin_unlock(&block_group->lock);
10110                 spin_unlock(&space_info->lock);
10111
10112                 /* DISCARD can flip during remount */
10113                 trimming = btrfs_test_opt(root, DISCARD);
10114
10115                 /* Implicit trim during transaction commit. */
10116                 if (trimming)
10117                         btrfs_get_block_group_trimming(block_group);
10118
10119                 /*
10120                  * Btrfs_remove_chunk will abort the transaction if things go
10121                  * horribly wrong.
10122                  */
10123                 ret = btrfs_remove_chunk(trans, root,
10124                                          block_group->key.objectid);
10125
10126                 if (ret) {
10127                         if (trimming)
10128                                 btrfs_put_block_group_trimming(block_group);
10129                         goto end_trans;
10130                 }
10131
10132                 /*
10133                  * If we're not mounted with -odiscard, we can just forget
10134                  * about this block group. Otherwise we'll need to wait
10135                  * until transaction commit to do the actual discard.
10136                  */
10137                 if (trimming) {
10138                         WARN_ON(!list_empty(&block_group->bg_list));
10139                         spin_lock(&trans->transaction->deleted_bgs_lock);
10140                         list_move(&block_group->bg_list,
10141                                   &trans->transaction->deleted_bgs);
10142                         spin_unlock(&trans->transaction->deleted_bgs_lock);
10143                         btrfs_get_block_group(block_group);
10144                 }
10145 end_trans:
10146                 btrfs_end_transaction(trans, root);
10147 next:
10148                 mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10149                 btrfs_put_block_group(block_group);
10150                 spin_lock(&fs_info->unused_bgs_lock);
10151         }
10152         spin_unlock(&fs_info->unused_bgs_lock);
10153 }
10154
10155 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10156 {
10157         struct btrfs_space_info *space_info;
10158         struct btrfs_super_block *disk_super;
10159         u64 features;
10160         u64 flags;
10161         int mixed = 0;
10162         int ret;
10163
10164         disk_super = fs_info->super_copy;
10165         if (!btrfs_super_root(disk_super))
10166                 return 1;
10167
10168         features = btrfs_super_incompat_flags(disk_super);
10169         if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10170                 mixed = 1;
10171
10172         flags = BTRFS_BLOCK_GROUP_SYSTEM;
10173         ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10174         if (ret)
10175                 goto out;
10176
10177         if (mixed) {
10178                 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10179                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10180         } else {
10181                 flags = BTRFS_BLOCK_GROUP_METADATA;
10182                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10183                 if (ret)
10184                         goto out;
10185
10186                 flags = BTRFS_BLOCK_GROUP_DATA;
10187                 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10188         }
10189 out:
10190         return ret;
10191 }
10192
10193 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10194 {
10195         return unpin_extent_range(root, start, end, false);
10196 }
10197
10198 /*
10199  * It used to be that old block groups would be left around forever.
10200  * Iterating over them would be enough to trim unused space.  Since we
10201  * now automatically remove them, we also need to iterate over unallocated
10202  * space.
10203  *
10204  * We don't want a transaction for this since the discard may take a
10205  * substantial amount of time.  We don't require that a transaction be
10206  * running, but we do need to take a running transaction into account
10207  * to ensure that we're not discarding chunks that were released in
10208  * the current transaction.
10209  *
10210  * Holding the chunks lock will prevent other threads from allocating
10211  * or releasing chunks, but it won't prevent a running transaction
10212  * from committing and releasing the memory that the pending chunks
10213  * list head uses.  For that, we need to take a reference to the
10214  * transaction.
10215  */
10216 static int btrfs_trim_free_extents(struct btrfs_device *device,
10217                                    u64 minlen, u64 *trimmed)
10218 {
10219         u64 start = 0, len = 0;
10220         int ret;
10221
10222         *trimmed = 0;
10223
10224         /* Not writeable = nothing to do. */
10225         if (!device->writeable)
10226                 return 0;
10227
10228         /* No free space = nothing to do. */
10229         if (device->total_bytes <= device->bytes_used)
10230                 return 0;
10231
10232         ret = 0;
10233
10234         while (1) {
10235                 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10236                 struct btrfs_transaction *trans;
10237                 u64 bytes;
10238
10239                 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10240                 if (ret)
10241                         return ret;
10242
10243                 down_read(&fs_info->commit_root_sem);
10244
10245                 spin_lock(&fs_info->trans_lock);
10246                 trans = fs_info->running_transaction;
10247                 if (trans)
10248                         atomic_inc(&trans->use_count);
10249                 spin_unlock(&fs_info->trans_lock);
10250
10251                 ret = find_free_dev_extent_start(trans, device, minlen, start,
10252                                                  &start, &len);
10253                 if (trans)
10254                         btrfs_put_transaction(trans);
10255
10256                 if (ret) {
10257                         up_read(&fs_info->commit_root_sem);
10258                         mutex_unlock(&fs_info->chunk_mutex);
10259                         if (ret == -ENOSPC)
10260                                 ret = 0;
10261                         break;
10262                 }
10263
10264                 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10265                 up_read(&fs_info->commit_root_sem);
10266                 mutex_unlock(&fs_info->chunk_mutex);
10267
10268                 if (ret)
10269                         break;
10270
10271                 start += len;
10272                 *trimmed += bytes;
10273
10274                 if (fatal_signal_pending(current)) {
10275                         ret = -ERESTARTSYS;
10276                         break;
10277                 }
10278
10279                 cond_resched();
10280         }
10281
10282         return ret;
10283 }
10284
10285 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10286 {
10287         struct btrfs_fs_info *fs_info = root->fs_info;
10288         struct btrfs_block_group_cache *cache = NULL;
10289         struct btrfs_device *device;
10290         struct list_head *devices;
10291         u64 group_trimmed;
10292         u64 start;
10293         u64 end;
10294         u64 trimmed = 0;
10295         u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10296         int ret = 0;
10297
10298         /*
10299          * try to trim all FS space, our block group may start from non-zero.
10300          */
10301         if (range->len == total_bytes)
10302                 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10303         else
10304                 cache = btrfs_lookup_block_group(fs_info, range->start);
10305
10306         while (cache) {
10307                 if (cache->key.objectid >= (range->start + range->len)) {
10308                         btrfs_put_block_group(cache);
10309                         break;
10310                 }
10311
10312                 start = max(range->start, cache->key.objectid);
10313                 end = min(range->start + range->len,
10314                                 cache->key.objectid + cache->key.offset);
10315
10316                 if (end - start >= range->minlen) {
10317                         if (!block_group_cache_done(cache)) {
10318                                 ret = cache_block_group(cache, 0);
10319                                 if (ret) {
10320                                         btrfs_put_block_group(cache);
10321                                         break;
10322                                 }
10323                                 ret = wait_block_group_cache_done(cache);
10324                                 if (ret) {
10325                                         btrfs_put_block_group(cache);
10326                                         break;
10327                                 }
10328                         }
10329                         ret = btrfs_trim_block_group(cache,
10330                                                      &group_trimmed,
10331                                                      start,
10332                                                      end,
10333                                                      range->minlen);
10334
10335                         trimmed += group_trimmed;
10336                         if (ret) {
10337                                 btrfs_put_block_group(cache);
10338                                 break;
10339                         }
10340                 }
10341
10342                 cache = next_block_group(fs_info->tree_root, cache);
10343         }
10344
10345         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10346         devices = &root->fs_info->fs_devices->alloc_list;
10347         list_for_each_entry(device, devices, dev_alloc_list) {
10348                 ret = btrfs_trim_free_extents(device, range->minlen,
10349                                               &group_trimmed);
10350                 if (ret)
10351                         break;
10352
10353                 trimmed += group_trimmed;
10354         }
10355         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10356
10357         range->len = trimmed;
10358         return ret;
10359 }
10360
10361 /*
10362  * btrfs_{start,end}_write_no_snapshoting() are similar to
10363  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10364  * data into the page cache through nocow before the subvolume is snapshoted,
10365  * but flush the data into disk after the snapshot creation, or to prevent
10366  * operations while snapshoting is ongoing and that cause the snapshot to be
10367  * inconsistent (writes followed by expanding truncates for example).
10368  */
10369 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10370 {
10371         percpu_counter_dec(&root->subv_writers->counter);
10372         /*
10373          * Make sure counter is updated before we wake up
10374          * waiters.
10375          */
10376         smp_mb();
10377         if (waitqueue_active(&root->subv_writers->wait))
10378                 wake_up(&root->subv_writers->wait);
10379 }
10380
10381 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10382 {
10383         if (atomic_read(&root->will_be_snapshoted))
10384                 return 0;
10385
10386         percpu_counter_inc(&root->subv_writers->counter);
10387         /*
10388          * Make sure counter is updated before we check for snapshot creation.
10389          */
10390         smp_mb();
10391         if (atomic_read(&root->will_be_snapshoted)) {
10392                 btrfs_end_write_no_snapshoting(root);
10393                 return 0;
10394         }
10395         return 1;
10396 }