]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/free-space-cache.c
ARM: delete struct sys_timer
[karo-tx-linux.git] / fs / btrfs / free-space-cache.c
1 /*
2  * Copyright (C) 2008 Red Hat.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
24 #include "ctree.h"
25 #include "free-space-cache.h"
26 #include "transaction.h"
27 #include "disk-io.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
30
31 #define BITS_PER_BITMAP         (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33
34 static int link_free_space(struct btrfs_free_space_ctl *ctl,
35                            struct btrfs_free_space *info);
36 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
37                               struct btrfs_free_space *info);
38
39 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
40                                                struct btrfs_path *path,
41                                                u64 offset)
42 {
43         struct btrfs_key key;
44         struct btrfs_key location;
45         struct btrfs_disk_key disk_key;
46         struct btrfs_free_space_header *header;
47         struct extent_buffer *leaf;
48         struct inode *inode = NULL;
49         int ret;
50
51         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
52         key.offset = offset;
53         key.type = 0;
54
55         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
56         if (ret < 0)
57                 return ERR_PTR(ret);
58         if (ret > 0) {
59                 btrfs_release_path(path);
60                 return ERR_PTR(-ENOENT);
61         }
62
63         leaf = path->nodes[0];
64         header = btrfs_item_ptr(leaf, path->slots[0],
65                                 struct btrfs_free_space_header);
66         btrfs_free_space_key(leaf, header, &disk_key);
67         btrfs_disk_key_to_cpu(&location, &disk_key);
68         btrfs_release_path(path);
69
70         inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
71         if (!inode)
72                 return ERR_PTR(-ENOENT);
73         if (IS_ERR(inode))
74                 return inode;
75         if (is_bad_inode(inode)) {
76                 iput(inode);
77                 return ERR_PTR(-ENOENT);
78         }
79
80         mapping_set_gfp_mask(inode->i_mapping,
81                         mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
82
83         return inode;
84 }
85
86 struct inode *lookup_free_space_inode(struct btrfs_root *root,
87                                       struct btrfs_block_group_cache
88                                       *block_group, struct btrfs_path *path)
89 {
90         struct inode *inode = NULL;
91         u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
92
93         spin_lock(&block_group->lock);
94         if (block_group->inode)
95                 inode = igrab(block_group->inode);
96         spin_unlock(&block_group->lock);
97         if (inode)
98                 return inode;
99
100         inode = __lookup_free_space_inode(root, path,
101                                           block_group->key.objectid);
102         if (IS_ERR(inode))
103                 return inode;
104
105         spin_lock(&block_group->lock);
106         if (!((BTRFS_I(inode)->flags & flags) == flags)) {
107                 printk(KERN_INFO "Old style space inode found, converting.\n");
108                 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
109                         BTRFS_INODE_NODATACOW;
110                 block_group->disk_cache_state = BTRFS_DC_CLEAR;
111         }
112
113         if (!block_group->iref) {
114                 block_group->inode = igrab(inode);
115                 block_group->iref = 1;
116         }
117         spin_unlock(&block_group->lock);
118
119         return inode;
120 }
121
122 int __create_free_space_inode(struct btrfs_root *root,
123                               struct btrfs_trans_handle *trans,
124                               struct btrfs_path *path, u64 ino, u64 offset)
125 {
126         struct btrfs_key key;
127         struct btrfs_disk_key disk_key;
128         struct btrfs_free_space_header *header;
129         struct btrfs_inode_item *inode_item;
130         struct extent_buffer *leaf;
131         u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
132         int ret;
133
134         ret = btrfs_insert_empty_inode(trans, root, path, ino);
135         if (ret)
136                 return ret;
137
138         /* We inline crc's for the free disk space cache */
139         if (ino != BTRFS_FREE_INO_OBJECTID)
140                 flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
141
142         leaf = path->nodes[0];
143         inode_item = btrfs_item_ptr(leaf, path->slots[0],
144                                     struct btrfs_inode_item);
145         btrfs_item_key(leaf, &disk_key, path->slots[0]);
146         memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
147                              sizeof(*inode_item));
148         btrfs_set_inode_generation(leaf, inode_item, trans->transid);
149         btrfs_set_inode_size(leaf, inode_item, 0);
150         btrfs_set_inode_nbytes(leaf, inode_item, 0);
151         btrfs_set_inode_uid(leaf, inode_item, 0);
152         btrfs_set_inode_gid(leaf, inode_item, 0);
153         btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
154         btrfs_set_inode_flags(leaf, inode_item, flags);
155         btrfs_set_inode_nlink(leaf, inode_item, 1);
156         btrfs_set_inode_transid(leaf, inode_item, trans->transid);
157         btrfs_set_inode_block_group(leaf, inode_item, offset);
158         btrfs_mark_buffer_dirty(leaf);
159         btrfs_release_path(path);
160
161         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
162         key.offset = offset;
163         key.type = 0;
164
165         ret = btrfs_insert_empty_item(trans, root, path, &key,
166                                       sizeof(struct btrfs_free_space_header));
167         if (ret < 0) {
168                 btrfs_release_path(path);
169                 return ret;
170         }
171         leaf = path->nodes[0];
172         header = btrfs_item_ptr(leaf, path->slots[0],
173                                 struct btrfs_free_space_header);
174         memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
175         btrfs_set_free_space_key(leaf, header, &disk_key);
176         btrfs_mark_buffer_dirty(leaf);
177         btrfs_release_path(path);
178
179         return 0;
180 }
181
182 int create_free_space_inode(struct btrfs_root *root,
183                             struct btrfs_trans_handle *trans,
184                             struct btrfs_block_group_cache *block_group,
185                             struct btrfs_path *path)
186 {
187         int ret;
188         u64 ino;
189
190         ret = btrfs_find_free_objectid(root, &ino);
191         if (ret < 0)
192                 return ret;
193
194         return __create_free_space_inode(root, trans, path, ino,
195                                          block_group->key.objectid);
196 }
197
198 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
199                                     struct btrfs_trans_handle *trans,
200                                     struct btrfs_path *path,
201                                     struct inode *inode)
202 {
203         struct btrfs_block_rsv *rsv;
204         u64 needed_bytes;
205         loff_t oldsize;
206         int ret = 0;
207
208         rsv = trans->block_rsv;
209         trans->block_rsv = &root->fs_info->global_block_rsv;
210
211         /* 1 for slack space, 1 for updating the inode */
212         needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
213                 btrfs_calc_trans_metadata_size(root, 1);
214
215         spin_lock(&trans->block_rsv->lock);
216         if (trans->block_rsv->reserved < needed_bytes) {
217                 spin_unlock(&trans->block_rsv->lock);
218                 trans->block_rsv = rsv;
219                 return -ENOSPC;
220         }
221         spin_unlock(&trans->block_rsv->lock);
222
223         oldsize = i_size_read(inode);
224         btrfs_i_size_write(inode, 0);
225         truncate_pagecache(inode, oldsize, 0);
226
227         /*
228          * We don't need an orphan item because truncating the free space cache
229          * will never be split across transactions.
230          */
231         ret = btrfs_truncate_inode_items(trans, root, inode,
232                                          0, BTRFS_EXTENT_DATA_KEY);
233
234         if (ret) {
235                 trans->block_rsv = rsv;
236                 btrfs_abort_transaction(trans, root, ret);
237                 return ret;
238         }
239
240         ret = btrfs_update_inode(trans, root, inode);
241         if (ret)
242                 btrfs_abort_transaction(trans, root, ret);
243         trans->block_rsv = rsv;
244
245         return ret;
246 }
247
248 static int readahead_cache(struct inode *inode)
249 {
250         struct file_ra_state *ra;
251         unsigned long last_index;
252
253         ra = kzalloc(sizeof(*ra), GFP_NOFS);
254         if (!ra)
255                 return -ENOMEM;
256
257         file_ra_state_init(ra, inode->i_mapping);
258         last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
259
260         page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
261
262         kfree(ra);
263
264         return 0;
265 }
266
267 struct io_ctl {
268         void *cur, *orig;
269         struct page *page;
270         struct page **pages;
271         struct btrfs_root *root;
272         unsigned long size;
273         int index;
274         int num_pages;
275         unsigned check_crcs:1;
276 };
277
278 static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
279                        struct btrfs_root *root)
280 {
281         memset(io_ctl, 0, sizeof(struct io_ctl));
282         io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
283                 PAGE_CACHE_SHIFT;
284         io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
285                                 GFP_NOFS);
286         if (!io_ctl->pages)
287                 return -ENOMEM;
288         io_ctl->root = root;
289         if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
290                 io_ctl->check_crcs = 1;
291         return 0;
292 }
293
294 static void io_ctl_free(struct io_ctl *io_ctl)
295 {
296         kfree(io_ctl->pages);
297 }
298
299 static void io_ctl_unmap_page(struct io_ctl *io_ctl)
300 {
301         if (io_ctl->cur) {
302                 kunmap(io_ctl->page);
303                 io_ctl->cur = NULL;
304                 io_ctl->orig = NULL;
305         }
306 }
307
308 static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
309 {
310         BUG_ON(io_ctl->index >= io_ctl->num_pages);
311         io_ctl->page = io_ctl->pages[io_ctl->index++];
312         io_ctl->cur = kmap(io_ctl->page);
313         io_ctl->orig = io_ctl->cur;
314         io_ctl->size = PAGE_CACHE_SIZE;
315         if (clear)
316                 memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
317 }
318
319 static void io_ctl_drop_pages(struct io_ctl *io_ctl)
320 {
321         int i;
322
323         io_ctl_unmap_page(io_ctl);
324
325         for (i = 0; i < io_ctl->num_pages; i++) {
326                 if (io_ctl->pages[i]) {
327                         ClearPageChecked(io_ctl->pages[i]);
328                         unlock_page(io_ctl->pages[i]);
329                         page_cache_release(io_ctl->pages[i]);
330                 }
331         }
332 }
333
334 static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
335                                 int uptodate)
336 {
337         struct page *page;
338         gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
339         int i;
340
341         for (i = 0; i < io_ctl->num_pages; i++) {
342                 page = find_or_create_page(inode->i_mapping, i, mask);
343                 if (!page) {
344                         io_ctl_drop_pages(io_ctl);
345                         return -ENOMEM;
346                 }
347                 io_ctl->pages[i] = page;
348                 if (uptodate && !PageUptodate(page)) {
349                         btrfs_readpage(NULL, page);
350                         lock_page(page);
351                         if (!PageUptodate(page)) {
352                                 printk(KERN_ERR "btrfs: error reading free "
353                                        "space cache\n");
354                                 io_ctl_drop_pages(io_ctl);
355                                 return -EIO;
356                         }
357                 }
358         }
359
360         for (i = 0; i < io_ctl->num_pages; i++) {
361                 clear_page_dirty_for_io(io_ctl->pages[i]);
362                 set_page_extent_mapped(io_ctl->pages[i]);
363         }
364
365         return 0;
366 }
367
368 static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
369 {
370         __le64 *val;
371
372         io_ctl_map_page(io_ctl, 1);
373
374         /*
375          * Skip the csum areas.  If we don't check crcs then we just have a
376          * 64bit chunk at the front of the first page.
377          */
378         if (io_ctl->check_crcs) {
379                 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
380                 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
381         } else {
382                 io_ctl->cur += sizeof(u64);
383                 io_ctl->size -= sizeof(u64) * 2;
384         }
385
386         val = io_ctl->cur;
387         *val = cpu_to_le64(generation);
388         io_ctl->cur += sizeof(u64);
389 }
390
391 static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
392 {
393         __le64 *gen;
394
395         /*
396          * Skip the crc area.  If we don't check crcs then we just have a 64bit
397          * chunk at the front of the first page.
398          */
399         if (io_ctl->check_crcs) {
400                 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
401                 io_ctl->size -= sizeof(u64) +
402                         (sizeof(u32) * io_ctl->num_pages);
403         } else {
404                 io_ctl->cur += sizeof(u64);
405                 io_ctl->size -= sizeof(u64) * 2;
406         }
407
408         gen = io_ctl->cur;
409         if (le64_to_cpu(*gen) != generation) {
410                 printk_ratelimited(KERN_ERR "btrfs: space cache generation "
411                                    "(%Lu) does not match inode (%Lu)\n", *gen,
412                                    generation);
413                 io_ctl_unmap_page(io_ctl);
414                 return -EIO;
415         }
416         io_ctl->cur += sizeof(u64);
417         return 0;
418 }
419
420 static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
421 {
422         u32 *tmp;
423         u32 crc = ~(u32)0;
424         unsigned offset = 0;
425
426         if (!io_ctl->check_crcs) {
427                 io_ctl_unmap_page(io_ctl);
428                 return;
429         }
430
431         if (index == 0)
432                 offset = sizeof(u32) * io_ctl->num_pages;
433
434         crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
435                               PAGE_CACHE_SIZE - offset);
436         btrfs_csum_final(crc, (char *)&crc);
437         io_ctl_unmap_page(io_ctl);
438         tmp = kmap(io_ctl->pages[0]);
439         tmp += index;
440         *tmp = crc;
441         kunmap(io_ctl->pages[0]);
442 }
443
444 static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
445 {
446         u32 *tmp, val;
447         u32 crc = ~(u32)0;
448         unsigned offset = 0;
449
450         if (!io_ctl->check_crcs) {
451                 io_ctl_map_page(io_ctl, 0);
452                 return 0;
453         }
454
455         if (index == 0)
456                 offset = sizeof(u32) * io_ctl->num_pages;
457
458         tmp = kmap(io_ctl->pages[0]);
459         tmp += index;
460         val = *tmp;
461         kunmap(io_ctl->pages[0]);
462
463         io_ctl_map_page(io_ctl, 0);
464         crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
465                               PAGE_CACHE_SIZE - offset);
466         btrfs_csum_final(crc, (char *)&crc);
467         if (val != crc) {
468                 printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
469                                    "space cache\n");
470                 io_ctl_unmap_page(io_ctl);
471                 return -EIO;
472         }
473
474         return 0;
475 }
476
477 static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
478                             void *bitmap)
479 {
480         struct btrfs_free_space_entry *entry;
481
482         if (!io_ctl->cur)
483                 return -ENOSPC;
484
485         entry = io_ctl->cur;
486         entry->offset = cpu_to_le64(offset);
487         entry->bytes = cpu_to_le64(bytes);
488         entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
489                 BTRFS_FREE_SPACE_EXTENT;
490         io_ctl->cur += sizeof(struct btrfs_free_space_entry);
491         io_ctl->size -= sizeof(struct btrfs_free_space_entry);
492
493         if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
494                 return 0;
495
496         io_ctl_set_crc(io_ctl, io_ctl->index - 1);
497
498         /* No more pages to map */
499         if (io_ctl->index >= io_ctl->num_pages)
500                 return 0;
501
502         /* map the next page */
503         io_ctl_map_page(io_ctl, 1);
504         return 0;
505 }
506
507 static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
508 {
509         if (!io_ctl->cur)
510                 return -ENOSPC;
511
512         /*
513          * If we aren't at the start of the current page, unmap this one and
514          * map the next one if there is any left.
515          */
516         if (io_ctl->cur != io_ctl->orig) {
517                 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
518                 if (io_ctl->index >= io_ctl->num_pages)
519                         return -ENOSPC;
520                 io_ctl_map_page(io_ctl, 0);
521         }
522
523         memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
524         io_ctl_set_crc(io_ctl, io_ctl->index - 1);
525         if (io_ctl->index < io_ctl->num_pages)
526                 io_ctl_map_page(io_ctl, 0);
527         return 0;
528 }
529
530 static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
531 {
532         /*
533          * If we're not on the boundary we know we've modified the page and we
534          * need to crc the page.
535          */
536         if (io_ctl->cur != io_ctl->orig)
537                 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
538         else
539                 io_ctl_unmap_page(io_ctl);
540
541         while (io_ctl->index < io_ctl->num_pages) {
542                 io_ctl_map_page(io_ctl, 1);
543                 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
544         }
545 }
546
547 static int io_ctl_read_entry(struct io_ctl *io_ctl,
548                             struct btrfs_free_space *entry, u8 *type)
549 {
550         struct btrfs_free_space_entry *e;
551         int ret;
552
553         if (!io_ctl->cur) {
554                 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
555                 if (ret)
556                         return ret;
557         }
558
559         e = io_ctl->cur;
560         entry->offset = le64_to_cpu(e->offset);
561         entry->bytes = le64_to_cpu(e->bytes);
562         *type = e->type;
563         io_ctl->cur += sizeof(struct btrfs_free_space_entry);
564         io_ctl->size -= sizeof(struct btrfs_free_space_entry);
565
566         if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
567                 return 0;
568
569         io_ctl_unmap_page(io_ctl);
570
571         return 0;
572 }
573
574 static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
575                               struct btrfs_free_space *entry)
576 {
577         int ret;
578
579         ret = io_ctl_check_crc(io_ctl, io_ctl->index);
580         if (ret)
581                 return ret;
582
583         memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
584         io_ctl_unmap_page(io_ctl);
585
586         return 0;
587 }
588
589 /*
590  * Since we attach pinned extents after the fact we can have contiguous sections
591  * of free space that are split up in entries.  This poses a problem with the
592  * tree logging stuff since it could have allocated across what appears to be 2
593  * entries since we would have merged the entries when adding the pinned extents
594  * back to the free space cache.  So run through the space cache that we just
595  * loaded and merge contiguous entries.  This will make the log replay stuff not
596  * blow up and it will make for nicer allocator behavior.
597  */
598 static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
599 {
600         struct btrfs_free_space *e, *prev = NULL;
601         struct rb_node *n;
602
603 again:
604         spin_lock(&ctl->tree_lock);
605         for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
606                 e = rb_entry(n, struct btrfs_free_space, offset_index);
607                 if (!prev)
608                         goto next;
609                 if (e->bitmap || prev->bitmap)
610                         goto next;
611                 if (prev->offset + prev->bytes == e->offset) {
612                         unlink_free_space(ctl, prev);
613                         unlink_free_space(ctl, e);
614                         prev->bytes += e->bytes;
615                         kmem_cache_free(btrfs_free_space_cachep, e);
616                         link_free_space(ctl, prev);
617                         prev = NULL;
618                         spin_unlock(&ctl->tree_lock);
619                         goto again;
620                 }
621 next:
622                 prev = e;
623         }
624         spin_unlock(&ctl->tree_lock);
625 }
626
627 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
628                             struct btrfs_free_space_ctl *ctl,
629                             struct btrfs_path *path, u64 offset)
630 {
631         struct btrfs_free_space_header *header;
632         struct extent_buffer *leaf;
633         struct io_ctl io_ctl;
634         struct btrfs_key key;
635         struct btrfs_free_space *e, *n;
636         struct list_head bitmaps;
637         u64 num_entries;
638         u64 num_bitmaps;
639         u64 generation;
640         u8 type;
641         int ret = 0;
642
643         INIT_LIST_HEAD(&bitmaps);
644
645         /* Nothing in the space cache, goodbye */
646         if (!i_size_read(inode))
647                 return 0;
648
649         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
650         key.offset = offset;
651         key.type = 0;
652
653         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
654         if (ret < 0)
655                 return 0;
656         else if (ret > 0) {
657                 btrfs_release_path(path);
658                 return 0;
659         }
660
661         ret = -1;
662
663         leaf = path->nodes[0];
664         header = btrfs_item_ptr(leaf, path->slots[0],
665                                 struct btrfs_free_space_header);
666         num_entries = btrfs_free_space_entries(leaf, header);
667         num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
668         generation = btrfs_free_space_generation(leaf, header);
669         btrfs_release_path(path);
670
671         if (BTRFS_I(inode)->generation != generation) {
672                 printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
673                        " not match free space cache generation (%llu)\n",
674                        (unsigned long long)BTRFS_I(inode)->generation,
675                        (unsigned long long)generation);
676                 return 0;
677         }
678
679         if (!num_entries)
680                 return 0;
681
682         ret = io_ctl_init(&io_ctl, inode, root);
683         if (ret)
684                 return ret;
685
686         ret = readahead_cache(inode);
687         if (ret)
688                 goto out;
689
690         ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
691         if (ret)
692                 goto out;
693
694         ret = io_ctl_check_crc(&io_ctl, 0);
695         if (ret)
696                 goto free_cache;
697
698         ret = io_ctl_check_generation(&io_ctl, generation);
699         if (ret)
700                 goto free_cache;
701
702         while (num_entries) {
703                 e = kmem_cache_zalloc(btrfs_free_space_cachep,
704                                       GFP_NOFS);
705                 if (!e)
706                         goto free_cache;
707
708                 ret = io_ctl_read_entry(&io_ctl, e, &type);
709                 if (ret) {
710                         kmem_cache_free(btrfs_free_space_cachep, e);
711                         goto free_cache;
712                 }
713
714                 if (!e->bytes) {
715                         kmem_cache_free(btrfs_free_space_cachep, e);
716                         goto free_cache;
717                 }
718
719                 if (type == BTRFS_FREE_SPACE_EXTENT) {
720                         spin_lock(&ctl->tree_lock);
721                         ret = link_free_space(ctl, e);
722                         spin_unlock(&ctl->tree_lock);
723                         if (ret) {
724                                 printk(KERN_ERR "Duplicate entries in "
725                                        "free space cache, dumping\n");
726                                 kmem_cache_free(btrfs_free_space_cachep, e);
727                                 goto free_cache;
728                         }
729                 } else {
730                         BUG_ON(!num_bitmaps);
731                         num_bitmaps--;
732                         e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
733                         if (!e->bitmap) {
734                                 kmem_cache_free(
735                                         btrfs_free_space_cachep, e);
736                                 goto free_cache;
737                         }
738                         spin_lock(&ctl->tree_lock);
739                         ret = link_free_space(ctl, e);
740                         ctl->total_bitmaps++;
741                         ctl->op->recalc_thresholds(ctl);
742                         spin_unlock(&ctl->tree_lock);
743                         if (ret) {
744                                 printk(KERN_ERR "Duplicate entries in "
745                                        "free space cache, dumping\n");
746                                 kmem_cache_free(btrfs_free_space_cachep, e);
747                                 goto free_cache;
748                         }
749                         list_add_tail(&e->list, &bitmaps);
750                 }
751
752                 num_entries--;
753         }
754
755         io_ctl_unmap_page(&io_ctl);
756
757         /*
758          * We add the bitmaps at the end of the entries in order that
759          * the bitmap entries are added to the cache.
760          */
761         list_for_each_entry_safe(e, n, &bitmaps, list) {
762                 list_del_init(&e->list);
763                 ret = io_ctl_read_bitmap(&io_ctl, e);
764                 if (ret)
765                         goto free_cache;
766         }
767
768         io_ctl_drop_pages(&io_ctl);
769         merge_space_tree(ctl);
770         ret = 1;
771 out:
772         io_ctl_free(&io_ctl);
773         return ret;
774 free_cache:
775         io_ctl_drop_pages(&io_ctl);
776         __btrfs_remove_free_space_cache(ctl);
777         goto out;
778 }
779
780 int load_free_space_cache(struct btrfs_fs_info *fs_info,
781                           struct btrfs_block_group_cache *block_group)
782 {
783         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
784         struct btrfs_root *root = fs_info->tree_root;
785         struct inode *inode;
786         struct btrfs_path *path;
787         int ret = 0;
788         bool matched;
789         u64 used = btrfs_block_group_used(&block_group->item);
790
791         /*
792          * If this block group has been marked to be cleared for one reason or
793          * another then we can't trust the on disk cache, so just return.
794          */
795         spin_lock(&block_group->lock);
796         if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
797                 spin_unlock(&block_group->lock);
798                 return 0;
799         }
800         spin_unlock(&block_group->lock);
801
802         path = btrfs_alloc_path();
803         if (!path)
804                 return 0;
805         path->search_commit_root = 1;
806         path->skip_locking = 1;
807
808         inode = lookup_free_space_inode(root, block_group, path);
809         if (IS_ERR(inode)) {
810                 btrfs_free_path(path);
811                 return 0;
812         }
813
814         /* We may have converted the inode and made the cache invalid. */
815         spin_lock(&block_group->lock);
816         if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
817                 spin_unlock(&block_group->lock);
818                 btrfs_free_path(path);
819                 goto out;
820         }
821         spin_unlock(&block_group->lock);
822
823         ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
824                                       path, block_group->key.objectid);
825         btrfs_free_path(path);
826         if (ret <= 0)
827                 goto out;
828
829         spin_lock(&ctl->tree_lock);
830         matched = (ctl->free_space == (block_group->key.offset - used -
831                                        block_group->bytes_super));
832         spin_unlock(&ctl->tree_lock);
833
834         if (!matched) {
835                 __btrfs_remove_free_space_cache(ctl);
836                 printk(KERN_ERR "block group %llu has an wrong amount of free "
837                        "space\n", block_group->key.objectid);
838                 ret = -1;
839         }
840 out:
841         if (ret < 0) {
842                 /* This cache is bogus, make sure it gets cleared */
843                 spin_lock(&block_group->lock);
844                 block_group->disk_cache_state = BTRFS_DC_CLEAR;
845                 spin_unlock(&block_group->lock);
846                 ret = 0;
847
848                 printk(KERN_ERR "btrfs: failed to load free space cache "
849                        "for block group %llu\n", block_group->key.objectid);
850         }
851
852         iput(inode);
853         return ret;
854 }
855
856 /**
857  * __btrfs_write_out_cache - write out cached info to an inode
858  * @root - the root the inode belongs to
859  * @ctl - the free space cache we are going to write out
860  * @block_group - the block_group for this cache if it belongs to a block_group
861  * @trans - the trans handle
862  * @path - the path to use
863  * @offset - the offset for the key we'll insert
864  *
865  * This function writes out a free space cache struct to disk for quick recovery
866  * on mount.  This will return 0 if it was successfull in writing the cache out,
867  * and -1 if it was not.
868  */
869 int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
870                             struct btrfs_free_space_ctl *ctl,
871                             struct btrfs_block_group_cache *block_group,
872                             struct btrfs_trans_handle *trans,
873                             struct btrfs_path *path, u64 offset)
874 {
875         struct btrfs_free_space_header *header;
876         struct extent_buffer *leaf;
877         struct rb_node *node;
878         struct list_head *pos, *n;
879         struct extent_state *cached_state = NULL;
880         struct btrfs_free_cluster *cluster = NULL;
881         struct extent_io_tree *unpin = NULL;
882         struct io_ctl io_ctl;
883         struct list_head bitmap_list;
884         struct btrfs_key key;
885         u64 start, extent_start, extent_end, len;
886         int entries = 0;
887         int bitmaps = 0;
888         int ret;
889         int err = -1;
890
891         INIT_LIST_HEAD(&bitmap_list);
892
893         if (!i_size_read(inode))
894                 return -1;
895
896         ret = io_ctl_init(&io_ctl, inode, root);
897         if (ret)
898                 return -1;
899
900         /* Get the cluster for this block_group if it exists */
901         if (block_group && !list_empty(&block_group->cluster_list))
902                 cluster = list_entry(block_group->cluster_list.next,
903                                      struct btrfs_free_cluster,
904                                      block_group_list);
905
906         /* Lock all pages first so we can lock the extent safely. */
907         io_ctl_prepare_pages(&io_ctl, inode, 0);
908
909         lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
910                          0, &cached_state);
911
912         node = rb_first(&ctl->free_space_offset);
913         if (!node && cluster) {
914                 node = rb_first(&cluster->root);
915                 cluster = NULL;
916         }
917
918         /* Make sure we can fit our crcs into the first page */
919         if (io_ctl.check_crcs &&
920             (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
921                 WARN_ON(1);
922                 goto out_nospc;
923         }
924
925         io_ctl_set_generation(&io_ctl, trans->transid);
926
927         /* Write out the extent entries */
928         while (node) {
929                 struct btrfs_free_space *e;
930
931                 e = rb_entry(node, struct btrfs_free_space, offset_index);
932                 entries++;
933
934                 ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
935                                        e->bitmap);
936                 if (ret)
937                         goto out_nospc;
938
939                 if (e->bitmap) {
940                         list_add_tail(&e->list, &bitmap_list);
941                         bitmaps++;
942                 }
943                 node = rb_next(node);
944                 if (!node && cluster) {
945                         node = rb_first(&cluster->root);
946                         cluster = NULL;
947                 }
948         }
949
950         /*
951          * We want to add any pinned extents to our free space cache
952          * so we don't leak the space
953          */
954
955         /*
956          * We shouldn't have switched the pinned extents yet so this is the
957          * right one
958          */
959         unpin = root->fs_info->pinned_extents;
960
961         if (block_group)
962                 start = block_group->key.objectid;
963
964         while (block_group && (start < block_group->key.objectid +
965                                block_group->key.offset)) {
966                 ret = find_first_extent_bit(unpin, start,
967                                             &extent_start, &extent_end,
968                                             EXTENT_DIRTY, NULL);
969                 if (ret) {
970                         ret = 0;
971                         break;
972                 }
973
974                 /* This pinned extent is out of our range */
975                 if (extent_start >= block_group->key.objectid +
976                     block_group->key.offset)
977                         break;
978
979                 extent_start = max(extent_start, start);
980                 extent_end = min(block_group->key.objectid +
981                                  block_group->key.offset, extent_end + 1);
982                 len = extent_end - extent_start;
983
984                 entries++;
985                 ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
986                 if (ret)
987                         goto out_nospc;
988
989                 start = extent_end;
990         }
991
992         /* Write out the bitmaps */
993         list_for_each_safe(pos, n, &bitmap_list) {
994                 struct btrfs_free_space *entry =
995                         list_entry(pos, struct btrfs_free_space, list);
996
997                 ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
998                 if (ret)
999                         goto out_nospc;
1000                 list_del_init(&entry->list);
1001         }
1002
1003         /* Zero out the rest of the pages just to make sure */
1004         io_ctl_zero_remaining_pages(&io_ctl);
1005
1006         ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
1007                                 0, i_size_read(inode), &cached_state);
1008         io_ctl_drop_pages(&io_ctl);
1009         unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1010                              i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1011
1012         if (ret)
1013                 goto out;
1014
1015
1016         btrfs_wait_ordered_range(inode, 0, (u64)-1);
1017
1018         key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1019         key.offset = offset;
1020         key.type = 0;
1021
1022         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1023         if (ret < 0) {
1024                 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1025                                  EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1026                                  GFP_NOFS);
1027                 goto out;
1028         }
1029         leaf = path->nodes[0];
1030         if (ret > 0) {
1031                 struct btrfs_key found_key;
1032                 BUG_ON(!path->slots[0]);
1033                 path->slots[0]--;
1034                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1035                 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1036                     found_key.offset != offset) {
1037                         clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1038                                          inode->i_size - 1,
1039                                          EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1040                                          NULL, GFP_NOFS);
1041                         btrfs_release_path(path);
1042                         goto out;
1043                 }
1044         }
1045
1046         BTRFS_I(inode)->generation = trans->transid;
1047         header = btrfs_item_ptr(leaf, path->slots[0],
1048                                 struct btrfs_free_space_header);
1049         btrfs_set_free_space_entries(leaf, header, entries);
1050         btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1051         btrfs_set_free_space_generation(leaf, header, trans->transid);
1052         btrfs_mark_buffer_dirty(leaf);
1053         btrfs_release_path(path);
1054
1055         err = 0;
1056 out:
1057         io_ctl_free(&io_ctl);
1058         if (err) {
1059                 invalidate_inode_pages2(inode->i_mapping);
1060                 BTRFS_I(inode)->generation = 0;
1061         }
1062         btrfs_update_inode(trans, root, inode);
1063         return err;
1064
1065 out_nospc:
1066         list_for_each_safe(pos, n, &bitmap_list) {
1067                 struct btrfs_free_space *entry =
1068                         list_entry(pos, struct btrfs_free_space, list);
1069                 list_del_init(&entry->list);
1070         }
1071         io_ctl_drop_pages(&io_ctl);
1072         unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1073                              i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1074         goto out;
1075 }
1076
1077 int btrfs_write_out_cache(struct btrfs_root *root,
1078                           struct btrfs_trans_handle *trans,
1079                           struct btrfs_block_group_cache *block_group,
1080                           struct btrfs_path *path)
1081 {
1082         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1083         struct inode *inode;
1084         int ret = 0;
1085
1086         root = root->fs_info->tree_root;
1087
1088         spin_lock(&block_group->lock);
1089         if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1090                 spin_unlock(&block_group->lock);
1091                 return 0;
1092         }
1093         spin_unlock(&block_group->lock);
1094
1095         inode = lookup_free_space_inode(root, block_group, path);
1096         if (IS_ERR(inode))
1097                 return 0;
1098
1099         ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
1100                                       path, block_group->key.objectid);
1101         if (ret) {
1102                 spin_lock(&block_group->lock);
1103                 block_group->disk_cache_state = BTRFS_DC_ERROR;
1104                 spin_unlock(&block_group->lock);
1105                 ret = 0;
1106 #ifdef DEBUG
1107                 printk(KERN_ERR "btrfs: failed to write free space cache "
1108                        "for block group %llu\n", block_group->key.objectid);
1109 #endif
1110         }
1111
1112         iput(inode);
1113         return ret;
1114 }
1115
1116 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1117                                           u64 offset)
1118 {
1119         BUG_ON(offset < bitmap_start);
1120         offset -= bitmap_start;
1121         return (unsigned long)(div_u64(offset, unit));
1122 }
1123
1124 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1125 {
1126         return (unsigned long)(div_u64(bytes, unit));
1127 }
1128
1129 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1130                                    u64 offset)
1131 {
1132         u64 bitmap_start;
1133         u64 bytes_per_bitmap;
1134
1135         bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1136         bitmap_start = offset - ctl->start;
1137         bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1138         bitmap_start *= bytes_per_bitmap;
1139         bitmap_start += ctl->start;
1140
1141         return bitmap_start;
1142 }
1143
1144 static int tree_insert_offset(struct rb_root *root, u64 offset,
1145                               struct rb_node *node, int bitmap)
1146 {
1147         struct rb_node **p = &root->rb_node;
1148         struct rb_node *parent = NULL;
1149         struct btrfs_free_space *info;
1150
1151         while (*p) {
1152                 parent = *p;
1153                 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1154
1155                 if (offset < info->offset) {
1156                         p = &(*p)->rb_left;
1157                 } else if (offset > info->offset) {
1158                         p = &(*p)->rb_right;
1159                 } else {
1160                         /*
1161                          * we could have a bitmap entry and an extent entry
1162                          * share the same offset.  If this is the case, we want
1163                          * the extent entry to always be found first if we do a
1164                          * linear search through the tree, since we want to have
1165                          * the quickest allocation time, and allocating from an
1166                          * extent is faster than allocating from a bitmap.  So
1167                          * if we're inserting a bitmap and we find an entry at
1168                          * this offset, we want to go right, or after this entry
1169                          * logically.  If we are inserting an extent and we've
1170                          * found a bitmap, we want to go left, or before
1171                          * logically.
1172                          */
1173                         if (bitmap) {
1174                                 if (info->bitmap) {
1175                                         WARN_ON_ONCE(1);
1176                                         return -EEXIST;
1177                                 }
1178                                 p = &(*p)->rb_right;
1179                         } else {
1180                                 if (!info->bitmap) {
1181                                         WARN_ON_ONCE(1);
1182                                         return -EEXIST;
1183                                 }
1184                                 p = &(*p)->rb_left;
1185                         }
1186                 }
1187         }
1188
1189         rb_link_node(node, parent, p);
1190         rb_insert_color(node, root);
1191
1192         return 0;
1193 }
1194
1195 /*
1196  * searches the tree for the given offset.
1197  *
1198  * fuzzy - If this is set, then we are trying to make an allocation, and we just
1199  * want a section that has at least bytes size and comes at or after the given
1200  * offset.
1201  */
1202 static struct btrfs_free_space *
1203 tree_search_offset(struct btrfs_free_space_ctl *ctl,
1204                    u64 offset, int bitmap_only, int fuzzy)
1205 {
1206         struct rb_node *n = ctl->free_space_offset.rb_node;
1207         struct btrfs_free_space *entry, *prev = NULL;
1208
1209         /* find entry that is closest to the 'offset' */
1210         while (1) {
1211                 if (!n) {
1212                         entry = NULL;
1213                         break;
1214                 }
1215
1216                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1217                 prev = entry;
1218
1219                 if (offset < entry->offset)
1220                         n = n->rb_left;
1221                 else if (offset > entry->offset)
1222                         n = n->rb_right;
1223                 else
1224                         break;
1225         }
1226
1227         if (bitmap_only) {
1228                 if (!entry)
1229                         return NULL;
1230                 if (entry->bitmap)
1231                         return entry;
1232
1233                 /*
1234                  * bitmap entry and extent entry may share same offset,
1235                  * in that case, bitmap entry comes after extent entry.
1236                  */
1237                 n = rb_next(n);
1238                 if (!n)
1239                         return NULL;
1240                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1241                 if (entry->offset != offset)
1242                         return NULL;
1243
1244                 WARN_ON(!entry->bitmap);
1245                 return entry;
1246         } else if (entry) {
1247                 if (entry->bitmap) {
1248                         /*
1249                          * if previous extent entry covers the offset,
1250                          * we should return it instead of the bitmap entry
1251                          */
1252                         n = rb_prev(&entry->offset_index);
1253                         if (n) {
1254                                 prev = rb_entry(n, struct btrfs_free_space,
1255                                                 offset_index);
1256                                 if (!prev->bitmap &&
1257                                     prev->offset + prev->bytes > offset)
1258                                         entry = prev;
1259                         }
1260                 }
1261                 return entry;
1262         }
1263
1264         if (!prev)
1265                 return NULL;
1266
1267         /* find last entry before the 'offset' */
1268         entry = prev;
1269         if (entry->offset > offset) {
1270                 n = rb_prev(&entry->offset_index);
1271                 if (n) {
1272                         entry = rb_entry(n, struct btrfs_free_space,
1273                                         offset_index);
1274                         BUG_ON(entry->offset > offset);
1275                 } else {
1276                         if (fuzzy)
1277                                 return entry;
1278                         else
1279                                 return NULL;
1280                 }
1281         }
1282
1283         if (entry->bitmap) {
1284                 n = rb_prev(&entry->offset_index);
1285                 if (n) {
1286                         prev = rb_entry(n, struct btrfs_free_space,
1287                                         offset_index);
1288                         if (!prev->bitmap &&
1289                             prev->offset + prev->bytes > offset)
1290                                 return prev;
1291                 }
1292                 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1293                         return entry;
1294         } else if (entry->offset + entry->bytes > offset)
1295                 return entry;
1296
1297         if (!fuzzy)
1298                 return NULL;
1299
1300         while (1) {
1301                 if (entry->bitmap) {
1302                         if (entry->offset + BITS_PER_BITMAP *
1303                             ctl->unit > offset)
1304                                 break;
1305                 } else {
1306                         if (entry->offset + entry->bytes > offset)
1307                                 break;
1308                 }
1309
1310                 n = rb_next(&entry->offset_index);
1311                 if (!n)
1312                         return NULL;
1313                 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1314         }
1315         return entry;
1316 }
1317
1318 static inline void
1319 __unlink_free_space(struct btrfs_free_space_ctl *ctl,
1320                     struct btrfs_free_space *info)
1321 {
1322         rb_erase(&info->offset_index, &ctl->free_space_offset);
1323         ctl->free_extents--;
1324 }
1325
1326 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1327                               struct btrfs_free_space *info)
1328 {
1329         __unlink_free_space(ctl, info);
1330         ctl->free_space -= info->bytes;
1331 }
1332
1333 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1334                            struct btrfs_free_space *info)
1335 {
1336         int ret = 0;
1337
1338         BUG_ON(!info->bitmap && !info->bytes);
1339         ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1340                                  &info->offset_index, (info->bitmap != NULL));
1341         if (ret)
1342                 return ret;
1343
1344         ctl->free_space += info->bytes;
1345         ctl->free_extents++;
1346         return ret;
1347 }
1348
1349 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1350 {
1351         struct btrfs_block_group_cache *block_group = ctl->private;
1352         u64 max_bytes;
1353         u64 bitmap_bytes;
1354         u64 extent_bytes;
1355         u64 size = block_group->key.offset;
1356         u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1357         int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
1358
1359         BUG_ON(ctl->total_bitmaps > max_bitmaps);
1360
1361         /*
1362          * The goal is to keep the total amount of memory used per 1gb of space
1363          * at or below 32k, so we need to adjust how much memory we allow to be
1364          * used by extent based free space tracking
1365          */
1366         if (size < 1024 * 1024 * 1024)
1367                 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1368         else
1369                 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1370                         div64_u64(size, 1024 * 1024 * 1024);
1371
1372         /*
1373          * we want to account for 1 more bitmap than what we have so we can make
1374          * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1375          * we add more bitmaps.
1376          */
1377         bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1378
1379         if (bitmap_bytes >= max_bytes) {
1380                 ctl->extents_thresh = 0;
1381                 return;
1382         }
1383
1384         /*
1385          * we want the extent entry threshold to always be at most 1/2 the maxw
1386          * bytes we can have, or whatever is less than that.
1387          */
1388         extent_bytes = max_bytes - bitmap_bytes;
1389         extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
1390
1391         ctl->extents_thresh =
1392                 div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
1393 }
1394
1395 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1396                                        struct btrfs_free_space *info,
1397                                        u64 offset, u64 bytes)
1398 {
1399         unsigned long start, count;
1400
1401         start = offset_to_bit(info->offset, ctl->unit, offset);
1402         count = bytes_to_bits(bytes, ctl->unit);
1403         BUG_ON(start + count > BITS_PER_BITMAP);
1404
1405         bitmap_clear(info->bitmap, start, count);
1406
1407         info->bytes -= bytes;
1408 }
1409
1410 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1411                               struct btrfs_free_space *info, u64 offset,
1412                               u64 bytes)
1413 {
1414         __bitmap_clear_bits(ctl, info, offset, bytes);
1415         ctl->free_space -= bytes;
1416 }
1417
1418 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1419                             struct btrfs_free_space *info, u64 offset,
1420                             u64 bytes)
1421 {
1422         unsigned long start, count;
1423
1424         start = offset_to_bit(info->offset, ctl->unit, offset);
1425         count = bytes_to_bits(bytes, ctl->unit);
1426         BUG_ON(start + count > BITS_PER_BITMAP);
1427
1428         bitmap_set(info->bitmap, start, count);
1429
1430         info->bytes += bytes;
1431         ctl->free_space += bytes;
1432 }
1433
1434 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1435                          struct btrfs_free_space *bitmap_info, u64 *offset,
1436                          u64 *bytes)
1437 {
1438         unsigned long found_bits = 0;
1439         unsigned long bits, i;
1440         unsigned long next_zero;
1441
1442         i = offset_to_bit(bitmap_info->offset, ctl->unit,
1443                           max_t(u64, *offset, bitmap_info->offset));
1444         bits = bytes_to_bits(*bytes, ctl->unit);
1445
1446         for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1447                 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1448                                                BITS_PER_BITMAP, i);
1449                 if ((next_zero - i) >= bits) {
1450                         found_bits = next_zero - i;
1451                         break;
1452                 }
1453                 i = next_zero;
1454         }
1455
1456         if (found_bits) {
1457                 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1458                 *bytes = (u64)(found_bits) * ctl->unit;
1459                 return 0;
1460         }
1461
1462         return -1;
1463 }
1464
1465 static struct btrfs_free_space *
1466 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
1467 {
1468         struct btrfs_free_space *entry;
1469         struct rb_node *node;
1470         int ret;
1471
1472         if (!ctl->free_space_offset.rb_node)
1473                 return NULL;
1474
1475         entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1476         if (!entry)
1477                 return NULL;
1478
1479         for (node = &entry->offset_index; node; node = rb_next(node)) {
1480                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1481                 if (entry->bytes < *bytes)
1482                         continue;
1483
1484                 if (entry->bitmap) {
1485                         ret = search_bitmap(ctl, entry, offset, bytes);
1486                         if (!ret)
1487                                 return entry;
1488                         continue;
1489                 }
1490
1491                 *offset = entry->offset;
1492                 *bytes = entry->bytes;
1493                 return entry;
1494         }
1495
1496         return NULL;
1497 }
1498
1499 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1500                            struct btrfs_free_space *info, u64 offset)
1501 {
1502         info->offset = offset_to_bitmap(ctl, offset);
1503         info->bytes = 0;
1504         INIT_LIST_HEAD(&info->list);
1505         link_free_space(ctl, info);
1506         ctl->total_bitmaps++;
1507
1508         ctl->op->recalc_thresholds(ctl);
1509 }
1510
1511 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1512                         struct btrfs_free_space *bitmap_info)
1513 {
1514         unlink_free_space(ctl, bitmap_info);
1515         kfree(bitmap_info->bitmap);
1516         kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1517         ctl->total_bitmaps--;
1518         ctl->op->recalc_thresholds(ctl);
1519 }
1520
1521 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1522                               struct btrfs_free_space *bitmap_info,
1523                               u64 *offset, u64 *bytes)
1524 {
1525         u64 end;
1526         u64 search_start, search_bytes;
1527         int ret;
1528
1529 again:
1530         end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1531
1532         /*
1533          * We need to search for bits in this bitmap.  We could only cover some
1534          * of the extent in this bitmap thanks to how we add space, so we need
1535          * to search for as much as it as we can and clear that amount, and then
1536          * go searching for the next bit.
1537          */
1538         search_start = *offset;
1539         search_bytes = ctl->unit;
1540         search_bytes = min(search_bytes, end - search_start + 1);
1541         ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
1542         BUG_ON(ret < 0 || search_start != *offset);
1543
1544         /* We may have found more bits than what we need */
1545         search_bytes = min(search_bytes, *bytes);
1546
1547         /* Cannot clear past the end of the bitmap */
1548         search_bytes = min(search_bytes, end - search_start + 1);
1549
1550         bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1551         *offset += search_bytes;
1552         *bytes -= search_bytes;
1553
1554         if (*bytes) {
1555                 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1556                 if (!bitmap_info->bytes)
1557                         free_bitmap(ctl, bitmap_info);
1558
1559                 /*
1560                  * no entry after this bitmap, but we still have bytes to
1561                  * remove, so something has gone wrong.
1562                  */
1563                 if (!next)
1564                         return -EINVAL;
1565
1566                 bitmap_info = rb_entry(next, struct btrfs_free_space,
1567                                        offset_index);
1568
1569                 /*
1570                  * if the next entry isn't a bitmap we need to return to let the
1571                  * extent stuff do its work.
1572                  */
1573                 if (!bitmap_info->bitmap)
1574                         return -EAGAIN;
1575
1576                 /*
1577                  * Ok the next item is a bitmap, but it may not actually hold
1578                  * the information for the rest of this free space stuff, so
1579                  * look for it, and if we don't find it return so we can try
1580                  * everything over again.
1581                  */
1582                 search_start = *offset;
1583                 search_bytes = ctl->unit;
1584                 ret = search_bitmap(ctl, bitmap_info, &search_start,
1585                                     &search_bytes);
1586                 if (ret < 0 || search_start != *offset)
1587                         return -EAGAIN;
1588
1589                 goto again;
1590         } else if (!bitmap_info->bytes)
1591                 free_bitmap(ctl, bitmap_info);
1592
1593         return 0;
1594 }
1595
1596 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1597                                struct btrfs_free_space *info, u64 offset,
1598                                u64 bytes)
1599 {
1600         u64 bytes_to_set = 0;
1601         u64 end;
1602
1603         end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1604
1605         bytes_to_set = min(end - offset, bytes);
1606
1607         bitmap_set_bits(ctl, info, offset, bytes_to_set);
1608
1609         return bytes_to_set;
1610
1611 }
1612
1613 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1614                       struct btrfs_free_space *info)
1615 {
1616         struct btrfs_block_group_cache *block_group = ctl->private;
1617
1618         /*
1619          * If we are below the extents threshold then we can add this as an
1620          * extent, and don't have to deal with the bitmap
1621          */
1622         if (ctl->free_extents < ctl->extents_thresh) {
1623                 /*
1624                  * If this block group has some small extents we don't want to
1625                  * use up all of our free slots in the cache with them, we want
1626                  * to reserve them to larger extents, however if we have plent
1627                  * of cache left then go ahead an dadd them, no sense in adding
1628                  * the overhead of a bitmap if we don't have to.
1629                  */
1630                 if (info->bytes <= block_group->sectorsize * 4) {
1631                         if (ctl->free_extents * 2 <= ctl->extents_thresh)
1632                                 return false;
1633                 } else {
1634                         return false;
1635                 }
1636         }
1637
1638         /*
1639          * some block groups are so tiny they can't be enveloped by a bitmap, so
1640          * don't even bother to create a bitmap for this
1641          */
1642         if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
1643                 return false;
1644
1645         return true;
1646 }
1647
1648 static struct btrfs_free_space_op free_space_op = {
1649         .recalc_thresholds      = recalculate_thresholds,
1650         .use_bitmap             = use_bitmap,
1651 };
1652
1653 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1654                               struct btrfs_free_space *info)
1655 {
1656         struct btrfs_free_space *bitmap_info;
1657         struct btrfs_block_group_cache *block_group = NULL;
1658         int added = 0;
1659         u64 bytes, offset, bytes_added;
1660         int ret;
1661
1662         bytes = info->bytes;
1663         offset = info->offset;
1664
1665         if (!ctl->op->use_bitmap(ctl, info))
1666                 return 0;
1667
1668         if (ctl->op == &free_space_op)
1669                 block_group = ctl->private;
1670 again:
1671         /*
1672          * Since we link bitmaps right into the cluster we need to see if we
1673          * have a cluster here, and if so and it has our bitmap we need to add
1674          * the free space to that bitmap.
1675          */
1676         if (block_group && !list_empty(&block_group->cluster_list)) {
1677                 struct btrfs_free_cluster *cluster;
1678                 struct rb_node *node;
1679                 struct btrfs_free_space *entry;
1680
1681                 cluster = list_entry(block_group->cluster_list.next,
1682                                      struct btrfs_free_cluster,
1683                                      block_group_list);
1684                 spin_lock(&cluster->lock);
1685                 node = rb_first(&cluster->root);
1686                 if (!node) {
1687                         spin_unlock(&cluster->lock);
1688                         goto no_cluster_bitmap;
1689                 }
1690
1691                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1692                 if (!entry->bitmap) {
1693                         spin_unlock(&cluster->lock);
1694                         goto no_cluster_bitmap;
1695                 }
1696
1697                 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1698                         bytes_added = add_bytes_to_bitmap(ctl, entry,
1699                                                           offset, bytes);
1700                         bytes -= bytes_added;
1701                         offset += bytes_added;
1702                 }
1703                 spin_unlock(&cluster->lock);
1704                 if (!bytes) {
1705                         ret = 1;
1706                         goto out;
1707                 }
1708         }
1709
1710 no_cluster_bitmap:
1711         bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1712                                          1, 0);
1713         if (!bitmap_info) {
1714                 BUG_ON(added);
1715                 goto new_bitmap;
1716         }
1717
1718         bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1719         bytes -= bytes_added;
1720         offset += bytes_added;
1721         added = 0;
1722
1723         if (!bytes) {
1724                 ret = 1;
1725                 goto out;
1726         } else
1727                 goto again;
1728
1729 new_bitmap:
1730         if (info && info->bitmap) {
1731                 add_new_bitmap(ctl, info, offset);
1732                 added = 1;
1733                 info = NULL;
1734                 goto again;
1735         } else {
1736                 spin_unlock(&ctl->tree_lock);
1737
1738                 /* no pre-allocated info, allocate a new one */
1739                 if (!info) {
1740                         info = kmem_cache_zalloc(btrfs_free_space_cachep,
1741                                                  GFP_NOFS);
1742                         if (!info) {
1743                                 spin_lock(&ctl->tree_lock);
1744                                 ret = -ENOMEM;
1745                                 goto out;
1746                         }
1747                 }
1748
1749                 /* allocate the bitmap */
1750                 info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
1751                 spin_lock(&ctl->tree_lock);
1752                 if (!info->bitmap) {
1753                         ret = -ENOMEM;
1754                         goto out;
1755                 }
1756                 goto again;
1757         }
1758
1759 out:
1760         if (info) {
1761                 if (info->bitmap)
1762                         kfree(info->bitmap);
1763                 kmem_cache_free(btrfs_free_space_cachep, info);
1764         }
1765
1766         return ret;
1767 }
1768
1769 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
1770                           struct btrfs_free_space *info, bool update_stat)
1771 {
1772         struct btrfs_free_space *left_info;
1773         struct btrfs_free_space *right_info;
1774         bool merged = false;
1775         u64 offset = info->offset;
1776         u64 bytes = info->bytes;
1777
1778         /*
1779          * first we want to see if there is free space adjacent to the range we
1780          * are adding, if there is remove that struct and add a new one to
1781          * cover the entire range
1782          */
1783         right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
1784         if (right_info && rb_prev(&right_info->offset_index))
1785                 left_info = rb_entry(rb_prev(&right_info->offset_index),
1786                                      struct btrfs_free_space, offset_index);
1787         else
1788                 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
1789
1790         if (right_info && !right_info->bitmap) {
1791                 if (update_stat)
1792                         unlink_free_space(ctl, right_info);
1793                 else
1794                         __unlink_free_space(ctl, right_info);
1795                 info->bytes += right_info->bytes;
1796                 kmem_cache_free(btrfs_free_space_cachep, right_info);
1797                 merged = true;
1798         }
1799
1800         if (left_info && !left_info->bitmap &&
1801             left_info->offset + left_info->bytes == offset) {
1802                 if (update_stat)
1803                         unlink_free_space(ctl, left_info);
1804                 else
1805                         __unlink_free_space(ctl, left_info);
1806                 info->offset = left_info->offset;
1807                 info->bytes += left_info->bytes;
1808                 kmem_cache_free(btrfs_free_space_cachep, left_info);
1809                 merged = true;
1810         }
1811
1812         return merged;
1813 }
1814
1815 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
1816                            u64 offset, u64 bytes)
1817 {
1818         struct btrfs_free_space *info;
1819         int ret = 0;
1820
1821         info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
1822         if (!info)
1823                 return -ENOMEM;
1824
1825         info->offset = offset;
1826         info->bytes = bytes;
1827
1828         spin_lock(&ctl->tree_lock);
1829
1830         if (try_merge_free_space(ctl, info, true))
1831                 goto link;
1832
1833         /*
1834          * There was no extent directly to the left or right of this new
1835          * extent then we know we're going to have to allocate a new extent, so
1836          * before we do that see if we need to drop this into a bitmap
1837          */
1838         ret = insert_into_bitmap(ctl, info);
1839         if (ret < 0) {
1840                 goto out;
1841         } else if (ret) {
1842                 ret = 0;
1843                 goto out;
1844         }
1845 link:
1846         ret = link_free_space(ctl, info);
1847         if (ret)
1848                 kmem_cache_free(btrfs_free_space_cachep, info);
1849 out:
1850         spin_unlock(&ctl->tree_lock);
1851
1852         if (ret) {
1853                 printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
1854                 BUG_ON(ret == -EEXIST);
1855         }
1856
1857         return ret;
1858 }
1859
1860 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
1861                             u64 offset, u64 bytes)
1862 {
1863         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1864         struct btrfs_free_space *info;
1865         int ret = 0;
1866
1867         spin_lock(&ctl->tree_lock);
1868
1869 again:
1870         if (!bytes)
1871                 goto out_lock;
1872
1873         info = tree_search_offset(ctl, offset, 0, 0);
1874         if (!info) {
1875                 /*
1876                  * oops didn't find an extent that matched the space we wanted
1877                  * to remove, look for a bitmap instead
1878                  */
1879                 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1880                                           1, 0);
1881                 if (!info) {
1882                         /* the tree logging code might be calling us before we
1883                          * have fully loaded the free space rbtree for this
1884                          * block group.  So it is possible the entry won't
1885                          * be in the rbtree yet at all.  The caching code
1886                          * will make sure not to put it in the rbtree if
1887                          * the logging code has pinned it.
1888                          */
1889                         goto out_lock;
1890                 }
1891         }
1892
1893         if (!info->bitmap) {
1894                 unlink_free_space(ctl, info);
1895                 if (offset == info->offset) {
1896                         u64 to_free = min(bytes, info->bytes);
1897
1898                         info->bytes -= to_free;
1899                         info->offset += to_free;
1900                         if (info->bytes) {
1901                                 ret = link_free_space(ctl, info);
1902                                 WARN_ON(ret);
1903                         } else {
1904                                 kmem_cache_free(btrfs_free_space_cachep, info);
1905                         }
1906
1907                         offset += to_free;
1908                         bytes -= to_free;
1909                         goto again;
1910                 } else {
1911                         u64 old_end = info->bytes + info->offset;
1912
1913                         info->bytes = offset - info->offset;
1914                         ret = link_free_space(ctl, info);
1915                         WARN_ON(ret);
1916                         if (ret)
1917                                 goto out_lock;
1918
1919                         /* Not enough bytes in this entry to satisfy us */
1920                         if (old_end < offset + bytes) {
1921                                 bytes -= old_end - offset;
1922                                 offset = old_end;
1923                                 goto again;
1924                         } else if (old_end == offset + bytes) {
1925                                 /* all done */
1926                                 goto out_lock;
1927                         }
1928                         spin_unlock(&ctl->tree_lock);
1929
1930                         ret = btrfs_add_free_space(block_group, offset + bytes,
1931                                                    old_end - (offset + bytes));
1932                         WARN_ON(ret);
1933                         goto out;
1934                 }
1935         }
1936
1937         ret = remove_from_bitmap(ctl, info, &offset, &bytes);
1938         if (ret == -EAGAIN)
1939                 goto again;
1940         BUG_ON(ret); /* logic error */
1941 out_lock:
1942         spin_unlock(&ctl->tree_lock);
1943 out:
1944         return ret;
1945 }
1946
1947 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1948                            u64 bytes)
1949 {
1950         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1951         struct btrfs_free_space *info;
1952         struct rb_node *n;
1953         int count = 0;
1954
1955         for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
1956                 info = rb_entry(n, struct btrfs_free_space, offset_index);
1957                 if (info->bytes >= bytes && !block_group->ro)
1958                         count++;
1959                 printk(KERN_CRIT "entry offset %llu, bytes %llu, bitmap %s\n",
1960                        (unsigned long long)info->offset,
1961                        (unsigned long long)info->bytes,
1962                        (info->bitmap) ? "yes" : "no");
1963         }
1964         printk(KERN_INFO "block group has cluster?: %s\n",
1965                list_empty(&block_group->cluster_list) ? "no" : "yes");
1966         printk(KERN_INFO "%d blocks of free space at or bigger than bytes is"
1967                "\n", count);
1968 }
1969
1970 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1971 {
1972         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1973
1974         spin_lock_init(&ctl->tree_lock);
1975         ctl->unit = block_group->sectorsize;
1976         ctl->start = block_group->key.objectid;
1977         ctl->private = block_group;
1978         ctl->op = &free_space_op;
1979
1980         /*
1981          * we only want to have 32k of ram per block group for keeping
1982          * track of free space, and if we pass 1/2 of that we want to
1983          * start converting things over to using bitmaps
1984          */
1985         ctl->extents_thresh = ((1024 * 32) / 2) /
1986                                 sizeof(struct btrfs_free_space);
1987 }
1988
1989 /*
1990  * for a given cluster, put all of its extents back into the free
1991  * space cache.  If the block group passed doesn't match the block group
1992  * pointed to by the cluster, someone else raced in and freed the
1993  * cluster already.  In that case, we just return without changing anything
1994  */
1995 static int
1996 __btrfs_return_cluster_to_free_space(
1997                              struct btrfs_block_group_cache *block_group,
1998                              struct btrfs_free_cluster *cluster)
1999 {
2000         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2001         struct btrfs_free_space *entry;
2002         struct rb_node *node;
2003
2004         spin_lock(&cluster->lock);
2005         if (cluster->block_group != block_group)
2006                 goto out;
2007
2008         cluster->block_group = NULL;
2009         cluster->window_start = 0;
2010         list_del_init(&cluster->block_group_list);
2011
2012         node = rb_first(&cluster->root);
2013         while (node) {
2014                 bool bitmap;
2015
2016                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2017                 node = rb_next(&entry->offset_index);
2018                 rb_erase(&entry->offset_index, &cluster->root);
2019
2020                 bitmap = (entry->bitmap != NULL);
2021                 if (!bitmap)
2022                         try_merge_free_space(ctl, entry, false);
2023                 tree_insert_offset(&ctl->free_space_offset,
2024                                    entry->offset, &entry->offset_index, bitmap);
2025         }
2026         cluster->root = RB_ROOT;
2027
2028 out:
2029         spin_unlock(&cluster->lock);
2030         btrfs_put_block_group(block_group);
2031         return 0;
2032 }
2033
2034 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
2035 {
2036         struct btrfs_free_space *info;
2037         struct rb_node *node;
2038
2039         while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2040                 info = rb_entry(node, struct btrfs_free_space, offset_index);
2041                 if (!info->bitmap) {
2042                         unlink_free_space(ctl, info);
2043                         kmem_cache_free(btrfs_free_space_cachep, info);
2044                 } else {
2045                         free_bitmap(ctl, info);
2046                 }
2047                 if (need_resched()) {
2048                         spin_unlock(&ctl->tree_lock);
2049                         cond_resched();
2050                         spin_lock(&ctl->tree_lock);
2051                 }
2052         }
2053 }
2054
2055 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2056 {
2057         spin_lock(&ctl->tree_lock);
2058         __btrfs_remove_free_space_cache_locked(ctl);
2059         spin_unlock(&ctl->tree_lock);
2060 }
2061
2062 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2063 {
2064         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2065         struct btrfs_free_cluster *cluster;
2066         struct list_head *head;
2067
2068         spin_lock(&ctl->tree_lock);
2069         while ((head = block_group->cluster_list.next) !=
2070                &block_group->cluster_list) {
2071                 cluster = list_entry(head, struct btrfs_free_cluster,
2072                                      block_group_list);
2073
2074                 WARN_ON(cluster->block_group != block_group);
2075                 __btrfs_return_cluster_to_free_space(block_group, cluster);
2076                 if (need_resched()) {
2077                         spin_unlock(&ctl->tree_lock);
2078                         cond_resched();
2079                         spin_lock(&ctl->tree_lock);
2080                 }
2081         }
2082         __btrfs_remove_free_space_cache_locked(ctl);
2083         spin_unlock(&ctl->tree_lock);
2084
2085 }
2086
2087 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2088                                u64 offset, u64 bytes, u64 empty_size)
2089 {
2090         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2091         struct btrfs_free_space *entry = NULL;
2092         u64 bytes_search = bytes + empty_size;
2093         u64 ret = 0;
2094
2095         spin_lock(&ctl->tree_lock);
2096         entry = find_free_space(ctl, &offset, &bytes_search);
2097         if (!entry)
2098                 goto out;
2099
2100         ret = offset;
2101         if (entry->bitmap) {
2102                 bitmap_clear_bits(ctl, entry, offset, bytes);
2103                 if (!entry->bytes)
2104                         free_bitmap(ctl, entry);
2105         } else {
2106                 unlink_free_space(ctl, entry);
2107                 entry->offset += bytes;
2108                 entry->bytes -= bytes;
2109                 if (!entry->bytes)
2110                         kmem_cache_free(btrfs_free_space_cachep, entry);
2111                 else
2112                         link_free_space(ctl, entry);
2113         }
2114
2115 out:
2116         spin_unlock(&ctl->tree_lock);
2117
2118         return ret;
2119 }
2120
2121 /*
2122  * given a cluster, put all of its extents back into the free space
2123  * cache.  If a block group is passed, this function will only free
2124  * a cluster that belongs to the passed block group.
2125  *
2126  * Otherwise, it'll get a reference on the block group pointed to by the
2127  * cluster and remove the cluster from it.
2128  */
2129 int btrfs_return_cluster_to_free_space(
2130                                struct btrfs_block_group_cache *block_group,
2131                                struct btrfs_free_cluster *cluster)
2132 {
2133         struct btrfs_free_space_ctl *ctl;
2134         int ret;
2135
2136         /* first, get a safe pointer to the block group */
2137         spin_lock(&cluster->lock);
2138         if (!block_group) {
2139                 block_group = cluster->block_group;
2140                 if (!block_group) {
2141                         spin_unlock(&cluster->lock);
2142                         return 0;
2143                 }
2144         } else if (cluster->block_group != block_group) {
2145                 /* someone else has already freed it don't redo their work */
2146                 spin_unlock(&cluster->lock);
2147                 return 0;
2148         }
2149         atomic_inc(&block_group->count);
2150         spin_unlock(&cluster->lock);
2151
2152         ctl = block_group->free_space_ctl;
2153
2154         /* now return any extents the cluster had on it */
2155         spin_lock(&ctl->tree_lock);
2156         ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2157         spin_unlock(&ctl->tree_lock);
2158
2159         /* finally drop our ref */
2160         btrfs_put_block_group(block_group);
2161         return ret;
2162 }
2163
2164 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2165                                    struct btrfs_free_cluster *cluster,
2166                                    struct btrfs_free_space *entry,
2167                                    u64 bytes, u64 min_start)
2168 {
2169         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2170         int err;
2171         u64 search_start = cluster->window_start;
2172         u64 search_bytes = bytes;
2173         u64 ret = 0;
2174
2175         search_start = min_start;
2176         search_bytes = bytes;
2177
2178         err = search_bitmap(ctl, entry, &search_start, &search_bytes);
2179         if (err)
2180                 return 0;
2181
2182         ret = search_start;
2183         __bitmap_clear_bits(ctl, entry, ret, bytes);
2184
2185         return ret;
2186 }
2187
2188 /*
2189  * given a cluster, try to allocate 'bytes' from it, returns 0
2190  * if it couldn't find anything suitably large, or a logical disk offset
2191  * if things worked out
2192  */
2193 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2194                              struct btrfs_free_cluster *cluster, u64 bytes,
2195                              u64 min_start)
2196 {
2197         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2198         struct btrfs_free_space *entry = NULL;
2199         struct rb_node *node;
2200         u64 ret = 0;
2201
2202         spin_lock(&cluster->lock);
2203         if (bytes > cluster->max_size)
2204                 goto out;
2205
2206         if (cluster->block_group != block_group)
2207                 goto out;
2208
2209         node = rb_first(&cluster->root);
2210         if (!node)
2211                 goto out;
2212
2213         entry = rb_entry(node, struct btrfs_free_space, offset_index);
2214         while(1) {
2215                 if (entry->bytes < bytes ||
2216                     (!entry->bitmap && entry->offset < min_start)) {
2217                         node = rb_next(&entry->offset_index);
2218                         if (!node)
2219                                 break;
2220                         entry = rb_entry(node, struct btrfs_free_space,
2221                                          offset_index);
2222                         continue;
2223                 }
2224
2225                 if (entry->bitmap) {
2226                         ret = btrfs_alloc_from_bitmap(block_group,
2227                                                       cluster, entry, bytes,
2228                                                       cluster->window_start);
2229                         if (ret == 0) {
2230                                 node = rb_next(&entry->offset_index);
2231                                 if (!node)
2232                                         break;
2233                                 entry = rb_entry(node, struct btrfs_free_space,
2234                                                  offset_index);
2235                                 continue;
2236                         }
2237                         cluster->window_start += bytes;
2238                 } else {
2239                         ret = entry->offset;
2240
2241                         entry->offset += bytes;
2242                         entry->bytes -= bytes;
2243                 }
2244
2245                 if (entry->bytes == 0)
2246                         rb_erase(&entry->offset_index, &cluster->root);
2247                 break;
2248         }
2249 out:
2250         spin_unlock(&cluster->lock);
2251
2252         if (!ret)
2253                 return 0;
2254
2255         spin_lock(&ctl->tree_lock);
2256
2257         ctl->free_space -= bytes;
2258         if (entry->bytes == 0) {
2259                 ctl->free_extents--;
2260                 if (entry->bitmap) {
2261                         kfree(entry->bitmap);
2262                         ctl->total_bitmaps--;
2263                         ctl->op->recalc_thresholds(ctl);
2264                 }
2265                 kmem_cache_free(btrfs_free_space_cachep, entry);
2266         }
2267
2268         spin_unlock(&ctl->tree_lock);
2269
2270         return ret;
2271 }
2272
2273 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2274                                 struct btrfs_free_space *entry,
2275                                 struct btrfs_free_cluster *cluster,
2276                                 u64 offset, u64 bytes,
2277                                 u64 cont1_bytes, u64 min_bytes)
2278 {
2279         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2280         unsigned long next_zero;
2281         unsigned long i;
2282         unsigned long want_bits;
2283         unsigned long min_bits;
2284         unsigned long found_bits;
2285         unsigned long start = 0;
2286         unsigned long total_found = 0;
2287         int ret;
2288
2289         i = offset_to_bit(entry->offset, ctl->unit,
2290                           max_t(u64, offset, entry->offset));
2291         want_bits = bytes_to_bits(bytes, ctl->unit);
2292         min_bits = bytes_to_bits(min_bytes, ctl->unit);
2293
2294 again:
2295         found_bits = 0;
2296         for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
2297                 next_zero = find_next_zero_bit(entry->bitmap,
2298                                                BITS_PER_BITMAP, i);
2299                 if (next_zero - i >= min_bits) {
2300                         found_bits = next_zero - i;
2301                         break;
2302                 }
2303                 i = next_zero;
2304         }
2305
2306         if (!found_bits)
2307                 return -ENOSPC;
2308
2309         if (!total_found) {
2310                 start = i;
2311                 cluster->max_size = 0;
2312         }
2313
2314         total_found += found_bits;
2315
2316         if (cluster->max_size < found_bits * ctl->unit)
2317                 cluster->max_size = found_bits * ctl->unit;
2318
2319         if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2320                 i = next_zero + 1;
2321                 goto again;
2322         }
2323
2324         cluster->window_start = start * ctl->unit + entry->offset;
2325         rb_erase(&entry->offset_index, &ctl->free_space_offset);
2326         ret = tree_insert_offset(&cluster->root, entry->offset,
2327                                  &entry->offset_index, 1);
2328         BUG_ON(ret); /* -EEXIST; Logic error */
2329
2330         trace_btrfs_setup_cluster(block_group, cluster,
2331                                   total_found * ctl->unit, 1);
2332         return 0;
2333 }
2334
2335 /*
2336  * This searches the block group for just extents to fill the cluster with.
2337  * Try to find a cluster with at least bytes total bytes, at least one
2338  * extent of cont1_bytes, and other clusters of at least min_bytes.
2339  */
2340 static noinline int
2341 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2342                         struct btrfs_free_cluster *cluster,
2343                         struct list_head *bitmaps, u64 offset, u64 bytes,
2344                         u64 cont1_bytes, u64 min_bytes)
2345 {
2346         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2347         struct btrfs_free_space *first = NULL;
2348         struct btrfs_free_space *entry = NULL;
2349         struct btrfs_free_space *last;
2350         struct rb_node *node;
2351         u64 window_start;
2352         u64 window_free;
2353         u64 max_extent;
2354         u64 total_size = 0;
2355
2356         entry = tree_search_offset(ctl, offset, 0, 1);
2357         if (!entry)
2358                 return -ENOSPC;
2359
2360         /*
2361          * We don't want bitmaps, so just move along until we find a normal
2362          * extent entry.
2363          */
2364         while (entry->bitmap || entry->bytes < min_bytes) {
2365                 if (entry->bitmap && list_empty(&entry->list))
2366                         list_add_tail(&entry->list, bitmaps);
2367                 node = rb_next(&entry->offset_index);
2368                 if (!node)
2369                         return -ENOSPC;
2370                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2371         }
2372
2373         window_start = entry->offset;
2374         window_free = entry->bytes;
2375         max_extent = entry->bytes;
2376         first = entry;
2377         last = entry;
2378
2379         for (node = rb_next(&entry->offset_index); node;
2380              node = rb_next(&entry->offset_index)) {
2381                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2382
2383                 if (entry->bitmap) {
2384                         if (list_empty(&entry->list))
2385                                 list_add_tail(&entry->list, bitmaps);
2386                         continue;
2387                 }
2388
2389                 if (entry->bytes < min_bytes)
2390                         continue;
2391
2392                 last = entry;
2393                 window_free += entry->bytes;
2394                 if (entry->bytes > max_extent)
2395                         max_extent = entry->bytes;
2396         }
2397
2398         if (window_free < bytes || max_extent < cont1_bytes)
2399                 return -ENOSPC;
2400
2401         cluster->window_start = first->offset;
2402
2403         node = &first->offset_index;
2404
2405         /*
2406          * now we've found our entries, pull them out of the free space
2407          * cache and put them into the cluster rbtree
2408          */
2409         do {
2410                 int ret;
2411
2412                 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2413                 node = rb_next(&entry->offset_index);
2414                 if (entry->bitmap || entry->bytes < min_bytes)
2415                         continue;
2416
2417                 rb_erase(&entry->offset_index, &ctl->free_space_offset);
2418                 ret = tree_insert_offset(&cluster->root, entry->offset,
2419                                          &entry->offset_index, 0);
2420                 total_size += entry->bytes;
2421                 BUG_ON(ret); /* -EEXIST; Logic error */
2422         } while (node && entry != last);
2423
2424         cluster->max_size = max_extent;
2425         trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2426         return 0;
2427 }
2428
2429 /*
2430  * This specifically looks for bitmaps that may work in the cluster, we assume
2431  * that we have already failed to find extents that will work.
2432  */
2433 static noinline int
2434 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2435                      struct btrfs_free_cluster *cluster,
2436                      struct list_head *bitmaps, u64 offset, u64 bytes,
2437                      u64 cont1_bytes, u64 min_bytes)
2438 {
2439         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2440         struct btrfs_free_space *entry;
2441         int ret = -ENOSPC;
2442         u64 bitmap_offset = offset_to_bitmap(ctl, offset);
2443
2444         if (ctl->total_bitmaps == 0)
2445                 return -ENOSPC;
2446
2447         /*
2448          * The bitmap that covers offset won't be in the list unless offset
2449          * is just its start offset.
2450          */
2451         entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
2452         if (entry->offset != bitmap_offset) {
2453                 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
2454                 if (entry && list_empty(&entry->list))
2455                         list_add(&entry->list, bitmaps);
2456         }
2457
2458         list_for_each_entry(entry, bitmaps, list) {
2459                 if (entry->bytes < bytes)
2460                         continue;
2461                 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2462                                            bytes, cont1_bytes, min_bytes);
2463                 if (!ret)
2464                         return 0;
2465         }
2466
2467         /*
2468          * The bitmaps list has all the bitmaps that record free space
2469          * starting after offset, so no more search is required.
2470          */
2471         return -ENOSPC;
2472 }
2473
2474 /*
2475  * here we try to find a cluster of blocks in a block group.  The goal
2476  * is to find at least bytes+empty_size.
2477  * We might not find them all in one contiguous area.
2478  *
2479  * returns zero and sets up cluster if things worked out, otherwise
2480  * it returns -enospc
2481  */
2482 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2483                              struct btrfs_root *root,
2484                              struct btrfs_block_group_cache *block_group,
2485                              struct btrfs_free_cluster *cluster,
2486                              u64 offset, u64 bytes, u64 empty_size)
2487 {
2488         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2489         struct btrfs_free_space *entry, *tmp;
2490         LIST_HEAD(bitmaps);
2491         u64 min_bytes;
2492         u64 cont1_bytes;
2493         int ret;
2494
2495         /*
2496          * Choose the minimum extent size we'll require for this
2497          * cluster.  For SSD_SPREAD, don't allow any fragmentation.
2498          * For metadata, allow allocates with smaller extents.  For
2499          * data, keep it dense.
2500          */
2501         if (btrfs_test_opt(root, SSD_SPREAD)) {
2502                 cont1_bytes = min_bytes = bytes + empty_size;
2503         } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
2504                 cont1_bytes = bytes;
2505                 min_bytes = block_group->sectorsize;
2506         } else {
2507                 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
2508                 min_bytes = block_group->sectorsize;
2509         }
2510
2511         spin_lock(&ctl->tree_lock);
2512
2513         /*
2514          * If we know we don't have enough space to make a cluster don't even
2515          * bother doing all the work to try and find one.
2516          */
2517         if (ctl->free_space < bytes) {
2518                 spin_unlock(&ctl->tree_lock);
2519                 return -ENOSPC;
2520         }
2521
2522         spin_lock(&cluster->lock);
2523
2524         /* someone already found a cluster, hooray */
2525         if (cluster->block_group) {
2526                 ret = 0;
2527                 goto out;
2528         }
2529
2530         trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
2531                                  min_bytes);
2532
2533         INIT_LIST_HEAD(&bitmaps);
2534         ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2535                                       bytes + empty_size,
2536                                       cont1_bytes, min_bytes);
2537         if (ret)
2538                 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2539                                            offset, bytes + empty_size,
2540                                            cont1_bytes, min_bytes);
2541
2542         /* Clear our temporary list */
2543         list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2544                 list_del_init(&entry->list);
2545
2546         if (!ret) {
2547                 atomic_inc(&block_group->count);
2548                 list_add_tail(&cluster->block_group_list,
2549                               &block_group->cluster_list);
2550                 cluster->block_group = block_group;
2551         } else {
2552                 trace_btrfs_failed_cluster_setup(block_group);
2553         }
2554 out:
2555         spin_unlock(&cluster->lock);
2556         spin_unlock(&ctl->tree_lock);
2557
2558         return ret;
2559 }
2560
2561 /*
2562  * simple code to zero out a cluster
2563  */
2564 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2565 {
2566         spin_lock_init(&cluster->lock);
2567         spin_lock_init(&cluster->refill_lock);
2568         cluster->root = RB_ROOT;
2569         cluster->max_size = 0;
2570         INIT_LIST_HEAD(&cluster->block_group_list);
2571         cluster->block_group = NULL;
2572 }
2573
2574 static int do_trimming(struct btrfs_block_group_cache *block_group,
2575                        u64 *total_trimmed, u64 start, u64 bytes,
2576                        u64 reserved_start, u64 reserved_bytes)
2577 {
2578         struct btrfs_space_info *space_info = block_group->space_info;
2579         struct btrfs_fs_info *fs_info = block_group->fs_info;
2580         int ret;
2581         int update = 0;
2582         u64 trimmed = 0;
2583
2584         spin_lock(&space_info->lock);
2585         spin_lock(&block_group->lock);
2586         if (!block_group->ro) {
2587                 block_group->reserved += reserved_bytes;
2588                 space_info->bytes_reserved += reserved_bytes;
2589                 update = 1;
2590         }
2591         spin_unlock(&block_group->lock);
2592         spin_unlock(&space_info->lock);
2593
2594         ret = btrfs_error_discard_extent(fs_info->extent_root,
2595                                          start, bytes, &trimmed);
2596         if (!ret)
2597                 *total_trimmed += trimmed;
2598
2599         btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2600
2601         if (update) {
2602                 spin_lock(&space_info->lock);
2603                 spin_lock(&block_group->lock);
2604                 if (block_group->ro)
2605                         space_info->bytes_readonly += reserved_bytes;
2606                 block_group->reserved -= reserved_bytes;
2607                 space_info->bytes_reserved -= reserved_bytes;
2608                 spin_unlock(&space_info->lock);
2609                 spin_unlock(&block_group->lock);
2610         }
2611
2612         return ret;
2613 }
2614
2615 static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2616                           u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2617 {
2618         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2619         struct btrfs_free_space *entry;
2620         struct rb_node *node;
2621         int ret = 0;
2622         u64 extent_start;
2623         u64 extent_bytes;
2624         u64 bytes;
2625
2626         while (start < end) {
2627                 spin_lock(&ctl->tree_lock);
2628
2629                 if (ctl->free_space < minlen) {
2630                         spin_unlock(&ctl->tree_lock);
2631                         break;
2632                 }
2633
2634                 entry = tree_search_offset(ctl, start, 0, 1);
2635                 if (!entry) {
2636                         spin_unlock(&ctl->tree_lock);
2637                         break;
2638                 }
2639
2640                 /* skip bitmaps */
2641                 while (entry->bitmap) {
2642                         node = rb_next(&entry->offset_index);
2643                         if (!node) {
2644                                 spin_unlock(&ctl->tree_lock);
2645                                 goto out;
2646                         }
2647                         entry = rb_entry(node, struct btrfs_free_space,
2648                                          offset_index);
2649                 }
2650
2651                 if (entry->offset >= end) {
2652                         spin_unlock(&ctl->tree_lock);
2653                         break;
2654                 }
2655
2656                 extent_start = entry->offset;
2657                 extent_bytes = entry->bytes;
2658                 start = max(start, extent_start);
2659                 bytes = min(extent_start + extent_bytes, end) - start;
2660                 if (bytes < minlen) {
2661                         spin_unlock(&ctl->tree_lock);
2662                         goto next;
2663                 }
2664
2665                 unlink_free_space(ctl, entry);
2666                 kmem_cache_free(btrfs_free_space_cachep, entry);
2667
2668                 spin_unlock(&ctl->tree_lock);
2669
2670                 ret = do_trimming(block_group, total_trimmed, start, bytes,
2671                                   extent_start, extent_bytes);
2672                 if (ret)
2673                         break;
2674 next:
2675                 start += bytes;
2676
2677                 if (fatal_signal_pending(current)) {
2678                         ret = -ERESTARTSYS;
2679                         break;
2680                 }
2681
2682                 cond_resched();
2683         }
2684 out:
2685         return ret;
2686 }
2687
2688 static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
2689                         u64 *total_trimmed, u64 start, u64 end, u64 minlen)
2690 {
2691         struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2692         struct btrfs_free_space *entry;
2693         int ret = 0;
2694         int ret2;
2695         u64 bytes;
2696         u64 offset = offset_to_bitmap(ctl, start);
2697
2698         while (offset < end) {
2699                 bool next_bitmap = false;
2700
2701                 spin_lock(&ctl->tree_lock);
2702
2703                 if (ctl->free_space < minlen) {
2704                         spin_unlock(&ctl->tree_lock);
2705                         break;
2706                 }
2707
2708                 entry = tree_search_offset(ctl, offset, 1, 0);
2709                 if (!entry) {
2710                         spin_unlock(&ctl->tree_lock);
2711                         next_bitmap = true;
2712                         goto next;
2713                 }
2714
2715                 bytes = minlen;
2716                 ret2 = search_bitmap(ctl, entry, &start, &bytes);
2717                 if (ret2 || start >= end) {
2718                         spin_unlock(&ctl->tree_lock);
2719                         next_bitmap = true;
2720                         goto next;
2721                 }
2722
2723                 bytes = min(bytes, end - start);
2724                 if (bytes < minlen) {
2725                         spin_unlock(&ctl->tree_lock);
2726                         goto next;
2727                 }
2728
2729                 bitmap_clear_bits(ctl, entry, start, bytes);
2730                 if (entry->bytes == 0)
2731                         free_bitmap(ctl, entry);
2732
2733                 spin_unlock(&ctl->tree_lock);
2734
2735                 ret = do_trimming(block_group, total_trimmed, start, bytes,
2736                                   start, bytes);
2737                 if (ret)
2738                         break;
2739 next:
2740                 if (next_bitmap) {
2741                         offset += BITS_PER_BITMAP * ctl->unit;
2742                 } else {
2743                         start += bytes;
2744                         if (start >= offset + BITS_PER_BITMAP * ctl->unit)
2745                                 offset += BITS_PER_BITMAP * ctl->unit;
2746                 }
2747
2748                 if (fatal_signal_pending(current)) {
2749                         ret = -ERESTARTSYS;
2750                         break;
2751                 }
2752
2753                 cond_resched();
2754         }
2755
2756         return ret;
2757 }
2758
2759 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
2760                            u64 *trimmed, u64 start, u64 end, u64 minlen)
2761 {
2762         int ret;
2763
2764         *trimmed = 0;
2765
2766         ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
2767         if (ret)
2768                 return ret;
2769
2770         ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
2771
2772         return ret;
2773 }
2774
2775 /*
2776  * Find the left-most item in the cache tree, and then return the
2777  * smallest inode number in the item.
2778  *
2779  * Note: the returned inode number may not be the smallest one in
2780  * the tree, if the left-most item is a bitmap.
2781  */
2782 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
2783 {
2784         struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
2785         struct btrfs_free_space *entry = NULL;
2786         u64 ino = 0;
2787
2788         spin_lock(&ctl->tree_lock);
2789
2790         if (RB_EMPTY_ROOT(&ctl->free_space_offset))
2791                 goto out;
2792
2793         entry = rb_entry(rb_first(&ctl->free_space_offset),
2794                          struct btrfs_free_space, offset_index);
2795
2796         if (!entry->bitmap) {
2797                 ino = entry->offset;
2798
2799                 unlink_free_space(ctl, entry);
2800                 entry->offset++;
2801                 entry->bytes--;
2802                 if (!entry->bytes)
2803                         kmem_cache_free(btrfs_free_space_cachep, entry);
2804                 else
2805                         link_free_space(ctl, entry);
2806         } else {
2807                 u64 offset = 0;
2808                 u64 count = 1;
2809                 int ret;
2810
2811                 ret = search_bitmap(ctl, entry, &offset, &count);
2812                 /* Logic error; Should be empty if it can't find anything */
2813                 BUG_ON(ret);
2814
2815                 ino = offset;
2816                 bitmap_clear_bits(ctl, entry, offset, 1);
2817                 if (entry->bytes == 0)
2818                         free_bitmap(ctl, entry);
2819         }
2820 out:
2821         spin_unlock(&ctl->tree_lock);
2822
2823         return ino;
2824 }
2825
2826 struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2827                                     struct btrfs_path *path)
2828 {
2829         struct inode *inode = NULL;
2830
2831         spin_lock(&root->cache_lock);
2832         if (root->cache_inode)
2833                 inode = igrab(root->cache_inode);
2834         spin_unlock(&root->cache_lock);
2835         if (inode)
2836                 return inode;
2837
2838         inode = __lookup_free_space_inode(root, path, 0);
2839         if (IS_ERR(inode))
2840                 return inode;
2841
2842         spin_lock(&root->cache_lock);
2843         if (!btrfs_fs_closing(root->fs_info))
2844                 root->cache_inode = igrab(inode);
2845         spin_unlock(&root->cache_lock);
2846
2847         return inode;
2848 }
2849
2850 int create_free_ino_inode(struct btrfs_root *root,
2851                           struct btrfs_trans_handle *trans,
2852                           struct btrfs_path *path)
2853 {
2854         return __create_free_space_inode(root, trans, path,
2855                                          BTRFS_FREE_INO_OBJECTID, 0);
2856 }
2857
2858 int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2859 {
2860         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2861         struct btrfs_path *path;
2862         struct inode *inode;
2863         int ret = 0;
2864         u64 root_gen = btrfs_root_generation(&root->root_item);
2865
2866         if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2867                 return 0;
2868
2869         /*
2870          * If we're unmounting then just return, since this does a search on the
2871          * normal root and not the commit root and we could deadlock.
2872          */
2873         if (btrfs_fs_closing(fs_info))
2874                 return 0;
2875
2876         path = btrfs_alloc_path();
2877         if (!path)
2878                 return 0;
2879
2880         inode = lookup_free_ino_inode(root, path);
2881         if (IS_ERR(inode))
2882                 goto out;
2883
2884         if (root_gen != BTRFS_I(inode)->generation)
2885                 goto out_put;
2886
2887         ret = __load_free_space_cache(root, inode, ctl, path, 0);
2888
2889         if (ret < 0)
2890                 printk(KERN_ERR "btrfs: failed to load free ino cache for "
2891                        "root %llu\n", root->root_key.objectid);
2892 out_put:
2893         iput(inode);
2894 out:
2895         btrfs_free_path(path);
2896         return ret;
2897 }
2898
2899 int btrfs_write_out_ino_cache(struct btrfs_root *root,
2900                               struct btrfs_trans_handle *trans,
2901                               struct btrfs_path *path)
2902 {
2903         struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
2904         struct inode *inode;
2905         int ret;
2906
2907         if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2908                 return 0;
2909
2910         inode = lookup_free_ino_inode(root, path);
2911         if (IS_ERR(inode))
2912                 return 0;
2913
2914         ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
2915         if (ret) {
2916                 btrfs_delalloc_release_metadata(inode, inode->i_size);
2917 #ifdef DEBUG
2918                 printk(KERN_ERR "btrfs: failed to write free ino cache "
2919                        "for root %llu\n", root->root_key.objectid);
2920 #endif
2921         }
2922
2923         iput(inode);
2924         return ret;
2925 }