]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/btrfs/inode.c
Btrfs: Direct I/O: Fix space accounting
[karo-tx-linux.git] / fs / btrfs / inode.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41 #include <linux/mount.h>
42 #include <linux/btrfs.h>
43 #include <linux/blkdev.h>
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/uio.h>
46 #include "ctree.h"
47 #include "disk-io.h"
48 #include "transaction.h"
49 #include "btrfs_inode.h"
50 #include "print-tree.h"
51 #include "ordered-data.h"
52 #include "xattr.h"
53 #include "tree-log.h"
54 #include "volumes.h"
55 #include "compression.h"
56 #include "locking.h"
57 #include "free-space-cache.h"
58 #include "inode-map.h"
59 #include "backref.h"
60 #include "hash.h"
61 #include "props.h"
62 #include "qgroup.h"
63
64 struct btrfs_iget_args {
65         struct btrfs_key *location;
66         struct btrfs_root *root;
67 };
68
69 static const struct inode_operations btrfs_dir_inode_operations;
70 static const struct inode_operations btrfs_symlink_inode_operations;
71 static const struct inode_operations btrfs_dir_ro_inode_operations;
72 static const struct inode_operations btrfs_special_inode_operations;
73 static const struct inode_operations btrfs_file_inode_operations;
74 static const struct address_space_operations btrfs_aops;
75 static const struct address_space_operations btrfs_symlink_aops;
76 static const struct file_operations btrfs_dir_file_operations;
77 static struct extent_io_ops btrfs_extent_io_ops;
78
79 static struct kmem_cache *btrfs_inode_cachep;
80 static struct kmem_cache *btrfs_delalloc_work_cachep;
81 struct kmem_cache *btrfs_trans_handle_cachep;
82 struct kmem_cache *btrfs_transaction_cachep;
83 struct kmem_cache *btrfs_path_cachep;
84 struct kmem_cache *btrfs_free_space_cachep;
85
86 #define S_SHIFT 12
87 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
88         [S_IFREG >> S_SHIFT]    = BTRFS_FT_REG_FILE,
89         [S_IFDIR >> S_SHIFT]    = BTRFS_FT_DIR,
90         [S_IFCHR >> S_SHIFT]    = BTRFS_FT_CHRDEV,
91         [S_IFBLK >> S_SHIFT]    = BTRFS_FT_BLKDEV,
92         [S_IFIFO >> S_SHIFT]    = BTRFS_FT_FIFO,
93         [S_IFSOCK >> S_SHIFT]   = BTRFS_FT_SOCK,
94         [S_IFLNK >> S_SHIFT]    = BTRFS_FT_SYMLINK,
95 };
96
97 static int btrfs_setsize(struct inode *inode, struct iattr *attr);
98 static int btrfs_truncate(struct inode *inode);
99 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
100 static noinline int cow_file_range(struct inode *inode,
101                                    struct page *locked_page,
102                                    u64 start, u64 end, int *page_started,
103                                    unsigned long *nr_written, int unlock);
104 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
105                                            u64 len, u64 orig_start,
106                                            u64 block_start, u64 block_len,
107                                            u64 orig_block_len, u64 ram_bytes,
108                                            int type);
109
110 static int btrfs_dirty_inode(struct inode *inode);
111
112 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
113 void btrfs_test_inode_set_ops(struct inode *inode)
114 {
115         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
116 }
117 #endif
118
119 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
120                                      struct inode *inode,  struct inode *dir,
121                                      const struct qstr *qstr)
122 {
123         int err;
124
125         err = btrfs_init_acl(trans, inode, dir);
126         if (!err)
127                 err = btrfs_xattr_security_init(trans, inode, dir, qstr);
128         return err;
129 }
130
131 /*
132  * this does all the hard work for inserting an inline extent into
133  * the btree.  The caller should have done a btrfs_drop_extents so that
134  * no overlapping inline items exist in the btree
135  */
136 static int insert_inline_extent(struct btrfs_trans_handle *trans,
137                                 struct btrfs_path *path, int extent_inserted,
138                                 struct btrfs_root *root, struct inode *inode,
139                                 u64 start, size_t size, size_t compressed_size,
140                                 int compress_type,
141                                 struct page **compressed_pages)
142 {
143         struct extent_buffer *leaf;
144         struct page *page = NULL;
145         char *kaddr;
146         unsigned long ptr;
147         struct btrfs_file_extent_item *ei;
148         int err = 0;
149         int ret;
150         size_t cur_size = size;
151         unsigned long offset;
152
153         if (compressed_size && compressed_pages)
154                 cur_size = compressed_size;
155
156         inode_add_bytes(inode, size);
157
158         if (!extent_inserted) {
159                 struct btrfs_key key;
160                 size_t datasize;
161
162                 key.objectid = btrfs_ino(inode);
163                 key.offset = start;
164                 key.type = BTRFS_EXTENT_DATA_KEY;
165
166                 datasize = btrfs_file_extent_calc_inline_size(cur_size);
167                 path->leave_spinning = 1;
168                 ret = btrfs_insert_empty_item(trans, root, path, &key,
169                                               datasize);
170                 if (ret) {
171                         err = ret;
172                         goto fail;
173                 }
174         }
175         leaf = path->nodes[0];
176         ei = btrfs_item_ptr(leaf, path->slots[0],
177                             struct btrfs_file_extent_item);
178         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
179         btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
180         btrfs_set_file_extent_encryption(leaf, ei, 0);
181         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
182         btrfs_set_file_extent_ram_bytes(leaf, ei, size);
183         ptr = btrfs_file_extent_inline_start(ei);
184
185         if (compress_type != BTRFS_COMPRESS_NONE) {
186                 struct page *cpage;
187                 int i = 0;
188                 while (compressed_size > 0) {
189                         cpage = compressed_pages[i];
190                         cur_size = min_t(unsigned long, compressed_size,
191                                        PAGE_CACHE_SIZE);
192
193                         kaddr = kmap_atomic(cpage);
194                         write_extent_buffer(leaf, kaddr, ptr, cur_size);
195                         kunmap_atomic(kaddr);
196
197                         i++;
198                         ptr += cur_size;
199                         compressed_size -= cur_size;
200                 }
201                 btrfs_set_file_extent_compression(leaf, ei,
202                                                   compress_type);
203         } else {
204                 page = find_get_page(inode->i_mapping,
205                                      start >> PAGE_CACHE_SHIFT);
206                 btrfs_set_file_extent_compression(leaf, ei, 0);
207                 kaddr = kmap_atomic(page);
208                 offset = start & (PAGE_CACHE_SIZE - 1);
209                 write_extent_buffer(leaf, kaddr + offset, ptr, size);
210                 kunmap_atomic(kaddr);
211                 page_cache_release(page);
212         }
213         btrfs_mark_buffer_dirty(leaf);
214         btrfs_release_path(path);
215
216         /*
217          * we're an inline extent, so nobody can
218          * extend the file past i_size without locking
219          * a page we already have locked.
220          *
221          * We must do any isize and inode updates
222          * before we unlock the pages.  Otherwise we
223          * could end up racing with unlink.
224          */
225         BTRFS_I(inode)->disk_i_size = inode->i_size;
226         ret = btrfs_update_inode(trans, root, inode);
227
228         return ret;
229 fail:
230         return err;
231 }
232
233
234 /*
235  * conditionally insert an inline extent into the file.  This
236  * does the checks required to make sure the data is small enough
237  * to fit as an inline extent.
238  */
239 static noinline int cow_file_range_inline(struct btrfs_root *root,
240                                           struct inode *inode, u64 start,
241                                           u64 end, size_t compressed_size,
242                                           int compress_type,
243                                           struct page **compressed_pages)
244 {
245         struct btrfs_trans_handle *trans;
246         u64 isize = i_size_read(inode);
247         u64 actual_end = min(end + 1, isize);
248         u64 inline_len = actual_end - start;
249         u64 aligned_end = ALIGN(end, root->sectorsize);
250         u64 data_len = inline_len;
251         int ret;
252         struct btrfs_path *path;
253         int extent_inserted = 0;
254         u32 extent_item_size;
255
256         if (compressed_size)
257                 data_len = compressed_size;
258
259         if (start > 0 ||
260             actual_end > PAGE_CACHE_SIZE ||
261             data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
262             (!compressed_size &&
263             (actual_end & (root->sectorsize - 1)) == 0) ||
264             end + 1 < isize ||
265             data_len > root->fs_info->max_inline) {
266                 return 1;
267         }
268
269         path = btrfs_alloc_path();
270         if (!path)
271                 return -ENOMEM;
272
273         trans = btrfs_join_transaction(root);
274         if (IS_ERR(trans)) {
275                 btrfs_free_path(path);
276                 return PTR_ERR(trans);
277         }
278         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
279
280         if (compressed_size && compressed_pages)
281                 extent_item_size = btrfs_file_extent_calc_inline_size(
282                    compressed_size);
283         else
284                 extent_item_size = btrfs_file_extent_calc_inline_size(
285                     inline_len);
286
287         ret = __btrfs_drop_extents(trans, root, inode, path,
288                                    start, aligned_end, NULL,
289                                    1, 1, extent_item_size, &extent_inserted);
290         if (ret) {
291                 btrfs_abort_transaction(trans, root, ret);
292                 goto out;
293         }
294
295         if (isize > actual_end)
296                 inline_len = min_t(u64, isize, actual_end);
297         ret = insert_inline_extent(trans, path, extent_inserted,
298                                    root, inode, start,
299                                    inline_len, compressed_size,
300                                    compress_type, compressed_pages);
301         if (ret && ret != -ENOSPC) {
302                 btrfs_abort_transaction(trans, root, ret);
303                 goto out;
304         } else if (ret == -ENOSPC) {
305                 ret = 1;
306                 goto out;
307         }
308
309         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
310         btrfs_delalloc_release_metadata(inode, end + 1 - start);
311         btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
312 out:
313         btrfs_free_path(path);
314         btrfs_end_transaction(trans, root);
315         return ret;
316 }
317
318 struct async_extent {
319         u64 start;
320         u64 ram_size;
321         u64 compressed_size;
322         struct page **pages;
323         unsigned long nr_pages;
324         int compress_type;
325         struct list_head list;
326 };
327
328 struct async_cow {
329         struct inode *inode;
330         struct btrfs_root *root;
331         struct page *locked_page;
332         u64 start;
333         u64 end;
334         struct list_head extents;
335         struct btrfs_work work;
336 };
337
338 static noinline int add_async_extent(struct async_cow *cow,
339                                      u64 start, u64 ram_size,
340                                      u64 compressed_size,
341                                      struct page **pages,
342                                      unsigned long nr_pages,
343                                      int compress_type)
344 {
345         struct async_extent *async_extent;
346
347         async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
348         BUG_ON(!async_extent); /* -ENOMEM */
349         async_extent->start = start;
350         async_extent->ram_size = ram_size;
351         async_extent->compressed_size = compressed_size;
352         async_extent->pages = pages;
353         async_extent->nr_pages = nr_pages;
354         async_extent->compress_type = compress_type;
355         list_add_tail(&async_extent->list, &cow->extents);
356         return 0;
357 }
358
359 static inline int inode_need_compress(struct inode *inode)
360 {
361         struct btrfs_root *root = BTRFS_I(inode)->root;
362
363         /* force compress */
364         if (btrfs_test_opt(root, FORCE_COMPRESS))
365                 return 1;
366         /* bad compression ratios */
367         if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
368                 return 0;
369         if (btrfs_test_opt(root, COMPRESS) ||
370             BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
371             BTRFS_I(inode)->force_compress)
372                 return 1;
373         return 0;
374 }
375
376 /*
377  * we create compressed extents in two phases.  The first
378  * phase compresses a range of pages that have already been
379  * locked (both pages and state bits are locked).
380  *
381  * This is done inside an ordered work queue, and the compression
382  * is spread across many cpus.  The actual IO submission is step
383  * two, and the ordered work queue takes care of making sure that
384  * happens in the same order things were put onto the queue by
385  * writepages and friends.
386  *
387  * If this code finds it can't get good compression, it puts an
388  * entry onto the work queue to write the uncompressed bytes.  This
389  * makes sure that both compressed inodes and uncompressed inodes
390  * are written in the same order that the flusher thread sent them
391  * down.
392  */
393 static noinline void compress_file_range(struct inode *inode,
394                                         struct page *locked_page,
395                                         u64 start, u64 end,
396                                         struct async_cow *async_cow,
397                                         int *num_added)
398 {
399         struct btrfs_root *root = BTRFS_I(inode)->root;
400         u64 num_bytes;
401         u64 blocksize = root->sectorsize;
402         u64 actual_end;
403         u64 isize = i_size_read(inode);
404         int ret = 0;
405         struct page **pages = NULL;
406         unsigned long nr_pages;
407         unsigned long nr_pages_ret = 0;
408         unsigned long total_compressed = 0;
409         unsigned long total_in = 0;
410         unsigned long max_compressed = 128 * 1024;
411         unsigned long max_uncompressed = 128 * 1024;
412         int i;
413         int will_compress;
414         int compress_type = root->fs_info->compress_type;
415         int redirty = 0;
416
417         /* if this is a small write inside eof, kick off a defrag */
418         if ((end - start + 1) < 16 * 1024 &&
419             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
420                 btrfs_add_inode_defrag(NULL, inode);
421
422         actual_end = min_t(u64, isize, end + 1);
423 again:
424         will_compress = 0;
425         nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
426         nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
427
428         /*
429          * we don't want to send crud past the end of i_size through
430          * compression, that's just a waste of CPU time.  So, if the
431          * end of the file is before the start of our current
432          * requested range of bytes, we bail out to the uncompressed
433          * cleanup code that can deal with all of this.
434          *
435          * It isn't really the fastest way to fix things, but this is a
436          * very uncommon corner.
437          */
438         if (actual_end <= start)
439                 goto cleanup_and_bail_uncompressed;
440
441         total_compressed = actual_end - start;
442
443         /*
444          * skip compression for a small file range(<=blocksize) that
445          * isn't an inline extent, since it dosen't save disk space at all.
446          */
447         if (total_compressed <= blocksize &&
448            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
449                 goto cleanup_and_bail_uncompressed;
450
451         /* we want to make sure that amount of ram required to uncompress
452          * an extent is reasonable, so we limit the total size in ram
453          * of a compressed extent to 128k.  This is a crucial number
454          * because it also controls how easily we can spread reads across
455          * cpus for decompression.
456          *
457          * We also want to make sure the amount of IO required to do
458          * a random read is reasonably small, so we limit the size of
459          * a compressed extent to 128k.
460          */
461         total_compressed = min(total_compressed, max_uncompressed);
462         num_bytes = ALIGN(end - start + 1, blocksize);
463         num_bytes = max(blocksize,  num_bytes);
464         total_in = 0;
465         ret = 0;
466
467         /*
468          * we do compression for mount -o compress and when the
469          * inode has not been flagged as nocompress.  This flag can
470          * change at any time if we discover bad compression ratios.
471          */
472         if (inode_need_compress(inode)) {
473                 WARN_ON(pages);
474                 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
475                 if (!pages) {
476                         /* just bail out to the uncompressed code */
477                         goto cont;
478                 }
479
480                 if (BTRFS_I(inode)->force_compress)
481                         compress_type = BTRFS_I(inode)->force_compress;
482
483                 /*
484                  * we need to call clear_page_dirty_for_io on each
485                  * page in the range.  Otherwise applications with the file
486                  * mmap'd can wander in and change the page contents while
487                  * we are compressing them.
488                  *
489                  * If the compression fails for any reason, we set the pages
490                  * dirty again later on.
491                  */
492                 extent_range_clear_dirty_for_io(inode, start, end);
493                 redirty = 1;
494                 ret = btrfs_compress_pages(compress_type,
495                                            inode->i_mapping, start,
496                                            total_compressed, pages,
497                                            nr_pages, &nr_pages_ret,
498                                            &total_in,
499                                            &total_compressed,
500                                            max_compressed);
501
502                 if (!ret) {
503                         unsigned long offset = total_compressed &
504                                 (PAGE_CACHE_SIZE - 1);
505                         struct page *page = pages[nr_pages_ret - 1];
506                         char *kaddr;
507
508                         /* zero the tail end of the last page, we might be
509                          * sending it down to disk
510                          */
511                         if (offset) {
512                                 kaddr = kmap_atomic(page);
513                                 memset(kaddr + offset, 0,
514                                        PAGE_CACHE_SIZE - offset);
515                                 kunmap_atomic(kaddr);
516                         }
517                         will_compress = 1;
518                 }
519         }
520 cont:
521         if (start == 0) {
522                 /* lets try to make an inline extent */
523                 if (ret || total_in < (actual_end - start)) {
524                         /* we didn't compress the entire range, try
525                          * to make an uncompressed inline extent.
526                          */
527                         ret = cow_file_range_inline(root, inode, start, end,
528                                                     0, 0, NULL);
529                 } else {
530                         /* try making a compressed inline extent */
531                         ret = cow_file_range_inline(root, inode, start, end,
532                                                     total_compressed,
533                                                     compress_type, pages);
534                 }
535                 if (ret <= 0) {
536                         unsigned long clear_flags = EXTENT_DELALLOC |
537                                 EXTENT_DEFRAG;
538                         unsigned long page_error_op;
539
540                         clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
541                         page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
542
543                         /*
544                          * inline extent creation worked or returned error,
545                          * we don't need to create any more async work items.
546                          * Unlock and free up our temp pages.
547                          */
548                         extent_clear_unlock_delalloc(inode, start, end, NULL,
549                                                      clear_flags, PAGE_UNLOCK |
550                                                      PAGE_CLEAR_DIRTY |
551                                                      PAGE_SET_WRITEBACK |
552                                                      page_error_op |
553                                                      PAGE_END_WRITEBACK);
554                         goto free_pages_out;
555                 }
556         }
557
558         if (will_compress) {
559                 /*
560                  * we aren't doing an inline extent round the compressed size
561                  * up to a block size boundary so the allocator does sane
562                  * things
563                  */
564                 total_compressed = ALIGN(total_compressed, blocksize);
565
566                 /*
567                  * one last check to make sure the compression is really a
568                  * win, compare the page count read with the blocks on disk
569                  */
570                 total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
571                 if (total_compressed >= total_in) {
572                         will_compress = 0;
573                 } else {
574                         num_bytes = total_in;
575                 }
576         }
577         if (!will_compress && pages) {
578                 /*
579                  * the compression code ran but failed to make things smaller,
580                  * free any pages it allocated and our page pointer array
581                  */
582                 for (i = 0; i < nr_pages_ret; i++) {
583                         WARN_ON(pages[i]->mapping);
584                         page_cache_release(pages[i]);
585                 }
586                 kfree(pages);
587                 pages = NULL;
588                 total_compressed = 0;
589                 nr_pages_ret = 0;
590
591                 /* flag the file so we don't compress in the future */
592                 if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
593                     !(BTRFS_I(inode)->force_compress)) {
594                         BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
595                 }
596         }
597         if (will_compress) {
598                 *num_added += 1;
599
600                 /* the async work queues will take care of doing actual
601                  * allocation on disk for these compressed pages,
602                  * and will submit them to the elevator.
603                  */
604                 add_async_extent(async_cow, start, num_bytes,
605                                  total_compressed, pages, nr_pages_ret,
606                                  compress_type);
607
608                 if (start + num_bytes < end) {
609                         start += num_bytes;
610                         pages = NULL;
611                         cond_resched();
612                         goto again;
613                 }
614         } else {
615 cleanup_and_bail_uncompressed:
616                 /*
617                  * No compression, but we still need to write the pages in
618                  * the file we've been given so far.  redirty the locked
619                  * page if it corresponds to our extent and set things up
620                  * for the async work queue to run cow_file_range to do
621                  * the normal delalloc dance
622                  */
623                 if (page_offset(locked_page) >= start &&
624                     page_offset(locked_page) <= end) {
625                         __set_page_dirty_nobuffers(locked_page);
626                         /* unlocked later on in the async handlers */
627                 }
628                 if (redirty)
629                         extent_range_redirty_for_io(inode, start, end);
630                 add_async_extent(async_cow, start, end - start + 1,
631                                  0, NULL, 0, BTRFS_COMPRESS_NONE);
632                 *num_added += 1;
633         }
634
635         return;
636
637 free_pages_out:
638         for (i = 0; i < nr_pages_ret; i++) {
639                 WARN_ON(pages[i]->mapping);
640                 page_cache_release(pages[i]);
641         }
642         kfree(pages);
643 }
644
645 static void free_async_extent_pages(struct async_extent *async_extent)
646 {
647         int i;
648
649         if (!async_extent->pages)
650                 return;
651
652         for (i = 0; i < async_extent->nr_pages; i++) {
653                 WARN_ON(async_extent->pages[i]->mapping);
654                 page_cache_release(async_extent->pages[i]);
655         }
656         kfree(async_extent->pages);
657         async_extent->nr_pages = 0;
658         async_extent->pages = NULL;
659 }
660
661 /*
662  * phase two of compressed writeback.  This is the ordered portion
663  * of the code, which only gets called in the order the work was
664  * queued.  We walk all the async extents created by compress_file_range
665  * and send them down to the disk.
666  */
667 static noinline void submit_compressed_extents(struct inode *inode,
668                                               struct async_cow *async_cow)
669 {
670         struct async_extent *async_extent;
671         u64 alloc_hint = 0;
672         struct btrfs_key ins;
673         struct extent_map *em;
674         struct btrfs_root *root = BTRFS_I(inode)->root;
675         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
676         struct extent_io_tree *io_tree;
677         int ret = 0;
678
679 again:
680         while (!list_empty(&async_cow->extents)) {
681                 async_extent = list_entry(async_cow->extents.next,
682                                           struct async_extent, list);
683                 list_del(&async_extent->list);
684
685                 io_tree = &BTRFS_I(inode)->io_tree;
686
687 retry:
688                 /* did the compression code fall back to uncompressed IO? */
689                 if (!async_extent->pages) {
690                         int page_started = 0;
691                         unsigned long nr_written = 0;
692
693                         lock_extent(io_tree, async_extent->start,
694                                          async_extent->start +
695                                          async_extent->ram_size - 1);
696
697                         /* allocate blocks */
698                         ret = cow_file_range(inode, async_cow->locked_page,
699                                              async_extent->start,
700                                              async_extent->start +
701                                              async_extent->ram_size - 1,
702                                              &page_started, &nr_written, 0);
703
704                         /* JDM XXX */
705
706                         /*
707                          * if page_started, cow_file_range inserted an
708                          * inline extent and took care of all the unlocking
709                          * and IO for us.  Otherwise, we need to submit
710                          * all those pages down to the drive.
711                          */
712                         if (!page_started && !ret)
713                                 extent_write_locked_range(io_tree,
714                                                   inode, async_extent->start,
715                                                   async_extent->start +
716                                                   async_extent->ram_size - 1,
717                                                   btrfs_get_extent,
718                                                   WB_SYNC_ALL);
719                         else if (ret)
720                                 unlock_page(async_cow->locked_page);
721                         kfree(async_extent);
722                         cond_resched();
723                         continue;
724                 }
725
726                 lock_extent(io_tree, async_extent->start,
727                             async_extent->start + async_extent->ram_size - 1);
728
729                 ret = btrfs_reserve_extent(root,
730                                            async_extent->compressed_size,
731                                            async_extent->compressed_size,
732                                            0, alloc_hint, &ins, 1, 1);
733                 if (ret) {
734                         free_async_extent_pages(async_extent);
735
736                         if (ret == -ENOSPC) {
737                                 unlock_extent(io_tree, async_extent->start,
738                                               async_extent->start +
739                                               async_extent->ram_size - 1);
740
741                                 /*
742                                  * we need to redirty the pages if we decide to
743                                  * fallback to uncompressed IO, otherwise we
744                                  * will not submit these pages down to lower
745                                  * layers.
746                                  */
747                                 extent_range_redirty_for_io(inode,
748                                                 async_extent->start,
749                                                 async_extent->start +
750                                                 async_extent->ram_size - 1);
751
752                                 goto retry;
753                         }
754                         goto out_free;
755                 }
756                 /*
757                  * here we're doing allocation and writeback of the
758                  * compressed pages
759                  */
760                 btrfs_drop_extent_cache(inode, async_extent->start,
761                                         async_extent->start +
762                                         async_extent->ram_size - 1, 0);
763
764                 em = alloc_extent_map();
765                 if (!em) {
766                         ret = -ENOMEM;
767                         goto out_free_reserve;
768                 }
769                 em->start = async_extent->start;
770                 em->len = async_extent->ram_size;
771                 em->orig_start = em->start;
772                 em->mod_start = em->start;
773                 em->mod_len = em->len;
774
775                 em->block_start = ins.objectid;
776                 em->block_len = ins.offset;
777                 em->orig_block_len = ins.offset;
778                 em->ram_bytes = async_extent->ram_size;
779                 em->bdev = root->fs_info->fs_devices->latest_bdev;
780                 em->compress_type = async_extent->compress_type;
781                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
782                 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
783                 em->generation = -1;
784
785                 while (1) {
786                         write_lock(&em_tree->lock);
787                         ret = add_extent_mapping(em_tree, em, 1);
788                         write_unlock(&em_tree->lock);
789                         if (ret != -EEXIST) {
790                                 free_extent_map(em);
791                                 break;
792                         }
793                         btrfs_drop_extent_cache(inode, async_extent->start,
794                                                 async_extent->start +
795                                                 async_extent->ram_size - 1, 0);
796                 }
797
798                 if (ret)
799                         goto out_free_reserve;
800
801                 ret = btrfs_add_ordered_extent_compress(inode,
802                                                 async_extent->start,
803                                                 ins.objectid,
804                                                 async_extent->ram_size,
805                                                 ins.offset,
806                                                 BTRFS_ORDERED_COMPRESSED,
807                                                 async_extent->compress_type);
808                 if (ret) {
809                         btrfs_drop_extent_cache(inode, async_extent->start,
810                                                 async_extent->start +
811                                                 async_extent->ram_size - 1, 0);
812                         goto out_free_reserve;
813                 }
814
815                 /*
816                  * clear dirty, set writeback and unlock the pages.
817                  */
818                 extent_clear_unlock_delalloc(inode, async_extent->start,
819                                 async_extent->start +
820                                 async_extent->ram_size - 1,
821                                 NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
822                                 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
823                                 PAGE_SET_WRITEBACK);
824                 ret = btrfs_submit_compressed_write(inode,
825                                     async_extent->start,
826                                     async_extent->ram_size,
827                                     ins.objectid,
828                                     ins.offset, async_extent->pages,
829                                     async_extent->nr_pages);
830                 if (ret) {
831                         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
832                         struct page *p = async_extent->pages[0];
833                         const u64 start = async_extent->start;
834                         const u64 end = start + async_extent->ram_size - 1;
835
836                         p->mapping = inode->i_mapping;
837                         tree->ops->writepage_end_io_hook(p, start, end,
838                                                          NULL, 0);
839                         p->mapping = NULL;
840                         extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
841                                                      PAGE_END_WRITEBACK |
842                                                      PAGE_SET_ERROR);
843                         free_async_extent_pages(async_extent);
844                 }
845                 alloc_hint = ins.objectid + ins.offset;
846                 kfree(async_extent);
847                 cond_resched();
848         }
849         return;
850 out_free_reserve:
851         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
852 out_free:
853         extent_clear_unlock_delalloc(inode, async_extent->start,
854                                      async_extent->start +
855                                      async_extent->ram_size - 1,
856                                      NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
857                                      EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
858                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
859                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
860                                      PAGE_SET_ERROR);
861         free_async_extent_pages(async_extent);
862         kfree(async_extent);
863         goto again;
864 }
865
866 static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
867                                       u64 num_bytes)
868 {
869         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
870         struct extent_map *em;
871         u64 alloc_hint = 0;
872
873         read_lock(&em_tree->lock);
874         em = search_extent_mapping(em_tree, start, num_bytes);
875         if (em) {
876                 /*
877                  * if block start isn't an actual block number then find the
878                  * first block in this inode and use that as a hint.  If that
879                  * block is also bogus then just don't worry about it.
880                  */
881                 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
882                         free_extent_map(em);
883                         em = search_extent_mapping(em_tree, 0, 0);
884                         if (em && em->block_start < EXTENT_MAP_LAST_BYTE)
885                                 alloc_hint = em->block_start;
886                         if (em)
887                                 free_extent_map(em);
888                 } else {
889                         alloc_hint = em->block_start;
890                         free_extent_map(em);
891                 }
892         }
893         read_unlock(&em_tree->lock);
894
895         return alloc_hint;
896 }
897
898 /*
899  * when extent_io.c finds a delayed allocation range in the file,
900  * the call backs end up in this code.  The basic idea is to
901  * allocate extents on disk for the range, and create ordered data structs
902  * in ram to track those extents.
903  *
904  * locked_page is the page that writepage had locked already.  We use
905  * it to make sure we don't do extra locks or unlocks.
906  *
907  * *page_started is set to one if we unlock locked_page and do everything
908  * required to start IO on it.  It may be clean and already done with
909  * IO when we return.
910  */
911 static noinline int cow_file_range(struct inode *inode,
912                                    struct page *locked_page,
913                                    u64 start, u64 end, int *page_started,
914                                    unsigned long *nr_written,
915                                    int unlock)
916 {
917         struct btrfs_root *root = BTRFS_I(inode)->root;
918         u64 alloc_hint = 0;
919         u64 num_bytes;
920         unsigned long ram_size;
921         u64 disk_num_bytes;
922         u64 cur_alloc_size;
923         u64 blocksize = root->sectorsize;
924         struct btrfs_key ins;
925         struct extent_map *em;
926         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
927         int ret = 0;
928
929         if (btrfs_is_free_space_inode(inode)) {
930                 WARN_ON_ONCE(1);
931                 ret = -EINVAL;
932                 goto out_unlock;
933         }
934
935         num_bytes = ALIGN(end - start + 1, blocksize);
936         num_bytes = max(blocksize,  num_bytes);
937         disk_num_bytes = num_bytes;
938
939         /* if this is a small write inside eof, kick off defrag */
940         if (num_bytes < 64 * 1024 &&
941             (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
942                 btrfs_add_inode_defrag(NULL, inode);
943
944         if (start == 0) {
945                 /* lets try to make an inline extent */
946                 ret = cow_file_range_inline(root, inode, start, end, 0, 0,
947                                             NULL);
948                 if (ret == 0) {
949                         extent_clear_unlock_delalloc(inode, start, end, NULL,
950                                      EXTENT_LOCKED | EXTENT_DELALLOC |
951                                      EXTENT_DEFRAG, PAGE_UNLOCK |
952                                      PAGE_CLEAR_DIRTY | PAGE_SET_WRITEBACK |
953                                      PAGE_END_WRITEBACK);
954
955                         *nr_written = *nr_written +
956                              (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
957                         *page_started = 1;
958                         goto out;
959                 } else if (ret < 0) {
960                         goto out_unlock;
961                 }
962         }
963
964         BUG_ON(disk_num_bytes >
965                btrfs_super_total_bytes(root->fs_info->super_copy));
966
967         alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
968         btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
969
970         while (disk_num_bytes > 0) {
971                 unsigned long op;
972
973                 cur_alloc_size = disk_num_bytes;
974                 ret = btrfs_reserve_extent(root, cur_alloc_size,
975                                            root->sectorsize, 0, alloc_hint,
976                                            &ins, 1, 1);
977                 if (ret < 0)
978                         goto out_unlock;
979
980                 em = alloc_extent_map();
981                 if (!em) {
982                         ret = -ENOMEM;
983                         goto out_reserve;
984                 }
985                 em->start = start;
986                 em->orig_start = em->start;
987                 ram_size = ins.offset;
988                 em->len = ins.offset;
989                 em->mod_start = em->start;
990                 em->mod_len = em->len;
991
992                 em->block_start = ins.objectid;
993                 em->block_len = ins.offset;
994                 em->orig_block_len = ins.offset;
995                 em->ram_bytes = ram_size;
996                 em->bdev = root->fs_info->fs_devices->latest_bdev;
997                 set_bit(EXTENT_FLAG_PINNED, &em->flags);
998                 em->generation = -1;
999
1000                 while (1) {
1001                         write_lock(&em_tree->lock);
1002                         ret = add_extent_mapping(em_tree, em, 1);
1003                         write_unlock(&em_tree->lock);
1004                         if (ret != -EEXIST) {
1005                                 free_extent_map(em);
1006                                 break;
1007                         }
1008                         btrfs_drop_extent_cache(inode, start,
1009                                                 start + ram_size - 1, 0);
1010                 }
1011                 if (ret)
1012                         goto out_reserve;
1013
1014                 cur_alloc_size = ins.offset;
1015                 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
1016                                                ram_size, cur_alloc_size, 0);
1017                 if (ret)
1018                         goto out_drop_extent_cache;
1019
1020                 if (root->root_key.objectid ==
1021                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1022                         ret = btrfs_reloc_clone_csums(inode, start,
1023                                                       cur_alloc_size);
1024                         if (ret)
1025                                 goto out_drop_extent_cache;
1026                 }
1027
1028                 if (disk_num_bytes < cur_alloc_size)
1029                         break;
1030
1031                 /* we're not doing compressed IO, don't unlock the first
1032                  * page (which the caller expects to stay locked), don't
1033                  * clear any dirty bits and don't set any writeback bits
1034                  *
1035                  * Do set the Private2 bit so we know this page was properly
1036                  * setup for writepage
1037                  */
1038                 op = unlock ? PAGE_UNLOCK : 0;
1039                 op |= PAGE_SET_PRIVATE2;
1040
1041                 extent_clear_unlock_delalloc(inode, start,
1042                                              start + ram_size - 1, locked_page,
1043                                              EXTENT_LOCKED | EXTENT_DELALLOC,
1044                                              op);
1045                 disk_num_bytes -= cur_alloc_size;
1046                 num_bytes -= cur_alloc_size;
1047                 alloc_hint = ins.objectid + ins.offset;
1048                 start += cur_alloc_size;
1049         }
1050 out:
1051         return ret;
1052
1053 out_drop_extent_cache:
1054         btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
1055 out_reserve:
1056         btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
1057 out_unlock:
1058         extent_clear_unlock_delalloc(inode, start, end, locked_page,
1059                                      EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
1060                                      EXTENT_DELALLOC | EXTENT_DEFRAG,
1061                                      PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
1062                                      PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK);
1063         goto out;
1064 }
1065
1066 /*
1067  * work queue call back to started compression on a file and pages
1068  */
1069 static noinline void async_cow_start(struct btrfs_work *work)
1070 {
1071         struct async_cow *async_cow;
1072         int num_added = 0;
1073         async_cow = container_of(work, struct async_cow, work);
1074
1075         compress_file_range(async_cow->inode, async_cow->locked_page,
1076                             async_cow->start, async_cow->end, async_cow,
1077                             &num_added);
1078         if (num_added == 0) {
1079                 btrfs_add_delayed_iput(async_cow->inode);
1080                 async_cow->inode = NULL;
1081         }
1082 }
1083
1084 /*
1085  * work queue call back to submit previously compressed pages
1086  */
1087 static noinline void async_cow_submit(struct btrfs_work *work)
1088 {
1089         struct async_cow *async_cow;
1090         struct btrfs_root *root;
1091         unsigned long nr_pages;
1092
1093         async_cow = container_of(work, struct async_cow, work);
1094
1095         root = async_cow->root;
1096         nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
1097                 PAGE_CACHE_SHIFT;
1098
1099         if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) <
1100             5 * 1024 * 1024 &&
1101             waitqueue_active(&root->fs_info->async_submit_wait))
1102                 wake_up(&root->fs_info->async_submit_wait);
1103
1104         if (async_cow->inode)
1105                 submit_compressed_extents(async_cow->inode, async_cow);
1106 }
1107
1108 static noinline void async_cow_free(struct btrfs_work *work)
1109 {
1110         struct async_cow *async_cow;
1111         async_cow = container_of(work, struct async_cow, work);
1112         if (async_cow->inode)
1113                 btrfs_add_delayed_iput(async_cow->inode);
1114         kfree(async_cow);
1115 }
1116
1117 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1118                                 u64 start, u64 end, int *page_started,
1119                                 unsigned long *nr_written)
1120 {
1121         struct async_cow *async_cow;
1122         struct btrfs_root *root = BTRFS_I(inode)->root;
1123         unsigned long nr_pages;
1124         u64 cur_end;
1125         int limit = 10 * 1024 * 1024;
1126
1127         clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
1128                          1, 0, NULL, GFP_NOFS);
1129         while (start < end) {
1130                 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1131                 BUG_ON(!async_cow); /* -ENOMEM */
1132                 async_cow->inode = igrab(inode);
1133                 async_cow->root = root;
1134                 async_cow->locked_page = locked_page;
1135                 async_cow->start = start;
1136
1137                 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
1138                     !btrfs_test_opt(root, FORCE_COMPRESS))
1139                         cur_end = end;
1140                 else
1141                         cur_end = min(end, start + 512 * 1024 - 1);
1142
1143                 async_cow->end = cur_end;
1144                 INIT_LIST_HEAD(&async_cow->extents);
1145
1146                 btrfs_init_work(&async_cow->work,
1147                                 btrfs_delalloc_helper,
1148                                 async_cow_start, async_cow_submit,
1149                                 async_cow_free);
1150
1151                 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
1152                         PAGE_CACHE_SHIFT;
1153                 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
1154
1155                 btrfs_queue_work(root->fs_info->delalloc_workers,
1156                                  &async_cow->work);
1157
1158                 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
1159                         wait_event(root->fs_info->async_submit_wait,
1160                            (atomic_read(&root->fs_info->async_delalloc_pages) <
1161                             limit));
1162                 }
1163
1164                 while (atomic_read(&root->fs_info->async_submit_draining) &&
1165                       atomic_read(&root->fs_info->async_delalloc_pages)) {
1166                         wait_event(root->fs_info->async_submit_wait,
1167                           (atomic_read(&root->fs_info->async_delalloc_pages) ==
1168                            0));
1169                 }
1170
1171                 *nr_written += nr_pages;
1172                 start = cur_end + 1;
1173         }
1174         *page_started = 1;
1175         return 0;
1176 }
1177
1178 static noinline int csum_exist_in_range(struct btrfs_root *root,
1179                                         u64 bytenr, u64 num_bytes)
1180 {
1181         int ret;
1182         struct btrfs_ordered_sum *sums;
1183         LIST_HEAD(list);
1184
1185         ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
1186                                        bytenr + num_bytes - 1, &list, 0);
1187         if (ret == 0 && list_empty(&list))
1188                 return 0;
1189
1190         while (!list_empty(&list)) {
1191                 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
1192                 list_del(&sums->list);
1193                 kfree(sums);
1194         }
1195         return 1;
1196 }
1197
1198 /*
1199  * when nowcow writeback call back.  This checks for snapshots or COW copies
1200  * of the extents that exist in the file, and COWs the file as required.
1201  *
1202  * If no cow copies or snapshots exist, we write directly to the existing
1203  * blocks on disk
1204  */
1205 static noinline int run_delalloc_nocow(struct inode *inode,
1206                                        struct page *locked_page,
1207                               u64 start, u64 end, int *page_started, int force,
1208                               unsigned long *nr_written)
1209 {
1210         struct btrfs_root *root = BTRFS_I(inode)->root;
1211         struct btrfs_trans_handle *trans;
1212         struct extent_buffer *leaf;
1213         struct btrfs_path *path;
1214         struct btrfs_file_extent_item *fi;
1215         struct btrfs_key found_key;
1216         u64 cow_start;
1217         u64 cur_offset;
1218         u64 extent_end;
1219         u64 extent_offset;
1220         u64 disk_bytenr;
1221         u64 num_bytes;
1222         u64 disk_num_bytes;
1223         u64 ram_bytes;
1224         int extent_type;
1225         int ret, err;
1226         int type;
1227         int nocow;
1228         int check_prev = 1;
1229         bool nolock;
1230         u64 ino = btrfs_ino(inode);
1231
1232         path = btrfs_alloc_path();
1233         if (!path) {
1234                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1235                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1236                                              EXTENT_DO_ACCOUNTING |
1237                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1238                                              PAGE_CLEAR_DIRTY |
1239                                              PAGE_SET_WRITEBACK |
1240                                              PAGE_END_WRITEBACK);
1241                 return -ENOMEM;
1242         }
1243
1244         nolock = btrfs_is_free_space_inode(inode);
1245
1246         if (nolock)
1247                 trans = btrfs_join_transaction_nolock(root);
1248         else
1249                 trans = btrfs_join_transaction(root);
1250
1251         if (IS_ERR(trans)) {
1252                 extent_clear_unlock_delalloc(inode, start, end, locked_page,
1253                                              EXTENT_LOCKED | EXTENT_DELALLOC |
1254                                              EXTENT_DO_ACCOUNTING |
1255                                              EXTENT_DEFRAG, PAGE_UNLOCK |
1256                                              PAGE_CLEAR_DIRTY |
1257                                              PAGE_SET_WRITEBACK |
1258                                              PAGE_END_WRITEBACK);
1259                 btrfs_free_path(path);
1260                 return PTR_ERR(trans);
1261         }
1262
1263         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1264
1265         cow_start = (u64)-1;
1266         cur_offset = start;
1267         while (1) {
1268                 ret = btrfs_lookup_file_extent(trans, root, path, ino,
1269                                                cur_offset, 0);
1270                 if (ret < 0)
1271                         goto error;
1272                 if (ret > 0 && path->slots[0] > 0 && check_prev) {
1273                         leaf = path->nodes[0];
1274                         btrfs_item_key_to_cpu(leaf, &found_key,
1275                                               path->slots[0] - 1);
1276                         if (found_key.objectid == ino &&
1277                             found_key.type == BTRFS_EXTENT_DATA_KEY)
1278                                 path->slots[0]--;
1279                 }
1280                 check_prev = 0;
1281 next_slot:
1282                 leaf = path->nodes[0];
1283                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1284                         ret = btrfs_next_leaf(root, path);
1285                         if (ret < 0)
1286                                 goto error;
1287                         if (ret > 0)
1288                                 break;
1289                         leaf = path->nodes[0];
1290                 }
1291
1292                 nocow = 0;
1293                 disk_bytenr = 0;
1294                 num_bytes = 0;
1295                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1296
1297                 if (found_key.objectid > ino ||
1298                     found_key.type > BTRFS_EXTENT_DATA_KEY ||
1299                     found_key.offset > end)
1300                         break;
1301
1302                 if (found_key.offset > cur_offset) {
1303                         extent_end = found_key.offset;
1304                         extent_type = 0;
1305                         goto out_check;
1306                 }
1307
1308                 fi = btrfs_item_ptr(leaf, path->slots[0],
1309                                     struct btrfs_file_extent_item);
1310                 extent_type = btrfs_file_extent_type(leaf, fi);
1311
1312                 ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1313                 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1314                     extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1315                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1316                         extent_offset = btrfs_file_extent_offset(leaf, fi);
1317                         extent_end = found_key.offset +
1318                                 btrfs_file_extent_num_bytes(leaf, fi);
1319                         disk_num_bytes =
1320                                 btrfs_file_extent_disk_num_bytes(leaf, fi);
1321                         if (extent_end <= start) {
1322                                 path->slots[0]++;
1323                                 goto next_slot;
1324                         }
1325                         if (disk_bytenr == 0)
1326                                 goto out_check;
1327                         if (btrfs_file_extent_compression(leaf, fi) ||
1328                             btrfs_file_extent_encryption(leaf, fi) ||
1329                             btrfs_file_extent_other_encoding(leaf, fi))
1330                                 goto out_check;
1331                         if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1332                                 goto out_check;
1333                         if (btrfs_extent_readonly(root, disk_bytenr))
1334                                 goto out_check;
1335                         if (btrfs_cross_ref_exist(trans, root, ino,
1336                                                   found_key.offset -
1337                                                   extent_offset, disk_bytenr))
1338                                 goto out_check;
1339                         disk_bytenr += extent_offset;
1340                         disk_bytenr += cur_offset - found_key.offset;
1341                         num_bytes = min(end + 1, extent_end) - cur_offset;
1342                         /*
1343                          * if there are pending snapshots for this root,
1344                          * we fall into common COW way.
1345                          */
1346                         if (!nolock) {
1347                                 err = btrfs_start_write_no_snapshoting(root);
1348                                 if (!err)
1349                                         goto out_check;
1350                         }
1351                         /*
1352                          * force cow if csum exists in the range.
1353                          * this ensure that csum for a given extent are
1354                          * either valid or do not exist.
1355                          */
1356                         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1357                                 goto out_check;
1358                         nocow = 1;
1359                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1360                         extent_end = found_key.offset +
1361                                 btrfs_file_extent_inline_len(leaf,
1362                                                      path->slots[0], fi);
1363                         extent_end = ALIGN(extent_end, root->sectorsize);
1364                 } else {
1365                         BUG_ON(1);
1366                 }
1367 out_check:
1368                 if (extent_end <= start) {
1369                         path->slots[0]++;
1370                         if (!nolock && nocow)
1371                                 btrfs_end_write_no_snapshoting(root);
1372                         goto next_slot;
1373                 }
1374                 if (!nocow) {
1375                         if (cow_start == (u64)-1)
1376                                 cow_start = cur_offset;
1377                         cur_offset = extent_end;
1378                         if (cur_offset > end)
1379                                 break;
1380                         path->slots[0]++;
1381                         goto next_slot;
1382                 }
1383
1384                 btrfs_release_path(path);
1385                 if (cow_start != (u64)-1) {
1386                         ret = cow_file_range(inode, locked_page,
1387                                              cow_start, found_key.offset - 1,
1388                                              page_started, nr_written, 1);
1389                         if (ret) {
1390                                 if (!nolock && nocow)
1391                                         btrfs_end_write_no_snapshoting(root);
1392                                 goto error;
1393                         }
1394                         cow_start = (u64)-1;
1395                 }
1396
1397                 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1398                         struct extent_map *em;
1399                         struct extent_map_tree *em_tree;
1400                         em_tree = &BTRFS_I(inode)->extent_tree;
1401                         em = alloc_extent_map();
1402                         BUG_ON(!em); /* -ENOMEM */
1403                         em->start = cur_offset;
1404                         em->orig_start = found_key.offset - extent_offset;
1405                         em->len = num_bytes;
1406                         em->block_len = num_bytes;
1407                         em->block_start = disk_bytenr;
1408                         em->orig_block_len = disk_num_bytes;
1409                         em->ram_bytes = ram_bytes;
1410                         em->bdev = root->fs_info->fs_devices->latest_bdev;
1411                         em->mod_start = em->start;
1412                         em->mod_len = em->len;
1413                         set_bit(EXTENT_FLAG_PINNED, &em->flags);
1414                         set_bit(EXTENT_FLAG_FILLING, &em->flags);
1415                         em->generation = -1;
1416                         while (1) {
1417                                 write_lock(&em_tree->lock);
1418                                 ret = add_extent_mapping(em_tree, em, 1);
1419                                 write_unlock(&em_tree->lock);
1420                                 if (ret != -EEXIST) {
1421                                         free_extent_map(em);
1422                                         break;
1423                                 }
1424                                 btrfs_drop_extent_cache(inode, em->start,
1425                                                 em->start + em->len - 1, 0);
1426                         }
1427                         type = BTRFS_ORDERED_PREALLOC;
1428                 } else {
1429                         type = BTRFS_ORDERED_NOCOW;
1430                 }
1431
1432                 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1433                                                num_bytes, num_bytes, type);
1434                 BUG_ON(ret); /* -ENOMEM */
1435
1436                 if (root->root_key.objectid ==
1437                     BTRFS_DATA_RELOC_TREE_OBJECTID) {
1438                         ret = btrfs_reloc_clone_csums(inode, cur_offset,
1439                                                       num_bytes);
1440                         if (ret) {
1441                                 if (!nolock && nocow)
1442                                         btrfs_end_write_no_snapshoting(root);
1443                                 goto error;
1444                         }
1445                 }
1446
1447                 extent_clear_unlock_delalloc(inode, cur_offset,
1448                                              cur_offset + num_bytes - 1,
1449                                              locked_page, EXTENT_LOCKED |
1450                                              EXTENT_DELALLOC, PAGE_UNLOCK |
1451                                              PAGE_SET_PRIVATE2);
1452                 if (!nolock && nocow)
1453                         btrfs_end_write_no_snapshoting(root);
1454                 cur_offset = extent_end;
1455                 if (cur_offset > end)
1456                         break;
1457         }
1458         btrfs_release_path(path);
1459
1460         if (cur_offset <= end && cow_start == (u64)-1) {
1461                 cow_start = cur_offset;
1462                 cur_offset = end;
1463         }
1464
1465         if (cow_start != (u64)-1) {
1466                 ret = cow_file_range(inode, locked_page, cow_start, end,
1467                                      page_started, nr_written, 1);
1468                 if (ret)
1469                         goto error;
1470         }
1471
1472 error:
1473         err = btrfs_end_transaction(trans, root);
1474         if (!ret)
1475                 ret = err;
1476
1477         if (ret && cur_offset < end)
1478                 extent_clear_unlock_delalloc(inode, cur_offset, end,
1479                                              locked_page, EXTENT_LOCKED |
1480                                              EXTENT_DELALLOC | EXTENT_DEFRAG |
1481                                              EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
1482                                              PAGE_CLEAR_DIRTY |
1483                                              PAGE_SET_WRITEBACK |
1484                                              PAGE_END_WRITEBACK);
1485         btrfs_free_path(path);
1486         return ret;
1487 }
1488
1489 static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
1490 {
1491
1492         if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
1493             !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC))
1494                 return 0;
1495
1496         /*
1497          * @defrag_bytes is a hint value, no spinlock held here,
1498          * if is not zero, it means the file is defragging.
1499          * Force cow if given extent needs to be defragged.
1500          */
1501         if (BTRFS_I(inode)->defrag_bytes &&
1502             test_range_bit(&BTRFS_I(inode)->io_tree, start, end,
1503                            EXTENT_DEFRAG, 0, NULL))
1504                 return 1;
1505
1506         return 0;
1507 }
1508
1509 /*
1510  * extent_io.c call back to do delayed allocation processing
1511  */
1512 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1513                               u64 start, u64 end, int *page_started,
1514                               unsigned long *nr_written)
1515 {
1516         int ret;
1517         int force_cow = need_force_cow(inode, start, end);
1518
1519         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
1520                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1521                                          page_started, 1, nr_written);
1522         } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
1523                 ret = run_delalloc_nocow(inode, locked_page, start, end,
1524                                          page_started, 0, nr_written);
1525         } else if (!inode_need_compress(inode)) {
1526                 ret = cow_file_range(inode, locked_page, start, end,
1527                                       page_started, nr_written, 1);
1528         } else {
1529                 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1530                         &BTRFS_I(inode)->runtime_flags);
1531                 ret = cow_file_range_async(inode, locked_page, start, end,
1532                                            page_started, nr_written);
1533         }
1534         return ret;
1535 }
1536
1537 static void btrfs_split_extent_hook(struct inode *inode,
1538                                     struct extent_state *orig, u64 split)
1539 {
1540         u64 size;
1541
1542         /* not delalloc, ignore it */
1543         if (!(orig->state & EXTENT_DELALLOC))
1544                 return;
1545
1546         size = orig->end - orig->start + 1;
1547         if (size > BTRFS_MAX_EXTENT_SIZE) {
1548                 u64 num_extents;
1549                 u64 new_size;
1550
1551                 /*
1552                  * See the explanation in btrfs_merge_extent_hook, the same
1553                  * applies here, just in reverse.
1554                  */
1555                 new_size = orig->end - split + 1;
1556                 num_extents = div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1557                                         BTRFS_MAX_EXTENT_SIZE);
1558                 new_size = split - orig->start;
1559                 num_extents += div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1560                                         BTRFS_MAX_EXTENT_SIZE);
1561                 if (div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
1562                               BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1563                         return;
1564         }
1565
1566         spin_lock(&BTRFS_I(inode)->lock);
1567         BTRFS_I(inode)->outstanding_extents++;
1568         spin_unlock(&BTRFS_I(inode)->lock);
1569 }
1570
1571 /*
1572  * extent_io.c merge_extent_hook, used to track merged delayed allocation
1573  * extents so we can keep track of new extents that are just merged onto old
1574  * extents, such as when we are doing sequential writes, so we can properly
1575  * account for the metadata space we'll need.
1576  */
1577 static void btrfs_merge_extent_hook(struct inode *inode,
1578                                     struct extent_state *new,
1579                                     struct extent_state *other)
1580 {
1581         u64 new_size, old_size;
1582         u64 num_extents;
1583
1584         /* not delalloc, ignore it */
1585         if (!(other->state & EXTENT_DELALLOC))
1586                 return;
1587
1588         if (new->start > other->start)
1589                 new_size = new->end - other->start + 1;
1590         else
1591                 new_size = other->end - new->start + 1;
1592
1593         /* we're not bigger than the max, unreserve the space and go */
1594         if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
1595                 spin_lock(&BTRFS_I(inode)->lock);
1596                 BTRFS_I(inode)->outstanding_extents--;
1597                 spin_unlock(&BTRFS_I(inode)->lock);
1598                 return;
1599         }
1600
1601         /*
1602          * We have to add up either side to figure out how many extents were
1603          * accounted for before we merged into one big extent.  If the number of
1604          * extents we accounted for is <= the amount we need for the new range
1605          * then we can return, otherwise drop.  Think of it like this
1606          *
1607          * [ 4k][MAX_SIZE]
1608          *
1609          * So we've grown the extent by a MAX_SIZE extent, this would mean we
1610          * need 2 outstanding extents, on one side we have 1 and the other side
1611          * we have 1 so they are == and we can return.  But in this case
1612          *
1613          * [MAX_SIZE+4k][MAX_SIZE+4k]
1614          *
1615          * Each range on their own accounts for 2 extents, but merged together
1616          * they are only 3 extents worth of accounting, so we need to drop in
1617          * this case.
1618          */
1619         old_size = other->end - other->start + 1;
1620         num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1621                                 BTRFS_MAX_EXTENT_SIZE);
1622         old_size = new->end - new->start + 1;
1623         num_extents += div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
1624                                  BTRFS_MAX_EXTENT_SIZE);
1625
1626         if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
1627                       BTRFS_MAX_EXTENT_SIZE) >= num_extents)
1628                 return;
1629
1630         spin_lock(&BTRFS_I(inode)->lock);
1631         BTRFS_I(inode)->outstanding_extents--;
1632         spin_unlock(&BTRFS_I(inode)->lock);
1633 }
1634
1635 static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
1636                                       struct inode *inode)
1637 {
1638         spin_lock(&root->delalloc_lock);
1639         if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1640                 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1641                               &root->delalloc_inodes);
1642                 set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1643                         &BTRFS_I(inode)->runtime_flags);
1644                 root->nr_delalloc_inodes++;
1645                 if (root->nr_delalloc_inodes == 1) {
1646                         spin_lock(&root->fs_info->delalloc_root_lock);
1647                         BUG_ON(!list_empty(&root->delalloc_root));
1648                         list_add_tail(&root->delalloc_root,
1649                                       &root->fs_info->delalloc_roots);
1650                         spin_unlock(&root->fs_info->delalloc_root_lock);
1651                 }
1652         }
1653         spin_unlock(&root->delalloc_lock);
1654 }
1655
1656 static void btrfs_del_delalloc_inode(struct btrfs_root *root,
1657                                      struct inode *inode)
1658 {
1659         spin_lock(&root->delalloc_lock);
1660         if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1661                 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1662                 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1663                           &BTRFS_I(inode)->runtime_flags);
1664                 root->nr_delalloc_inodes--;
1665                 if (!root->nr_delalloc_inodes) {
1666                         spin_lock(&root->fs_info->delalloc_root_lock);
1667                         BUG_ON(list_empty(&root->delalloc_root));
1668                         list_del_init(&root->delalloc_root);
1669                         spin_unlock(&root->fs_info->delalloc_root_lock);
1670                 }
1671         }
1672         spin_unlock(&root->delalloc_lock);
1673 }
1674
1675 /*
1676  * extent_io.c set_bit_hook, used to track delayed allocation
1677  * bytes in this file, and to maintain the list of inodes that
1678  * have pending delalloc work to be done.
1679  */
1680 static void btrfs_set_bit_hook(struct inode *inode,
1681                                struct extent_state *state, unsigned *bits)
1682 {
1683
1684         if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
1685                 WARN_ON(1);
1686         /*
1687          * set_bit and clear bit hooks normally require _irqsave/restore
1688          * but in this case, we are only testing for the DELALLOC
1689          * bit, which is only set or cleared with irqs on
1690          */
1691         if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1692                 struct btrfs_root *root = BTRFS_I(inode)->root;
1693                 u64 len = state->end + 1 - state->start;
1694                 bool do_list = !btrfs_is_free_space_inode(inode);
1695
1696                 if (*bits & EXTENT_FIRST_DELALLOC) {
1697                         *bits &= ~EXTENT_FIRST_DELALLOC;
1698                 } else {
1699                         spin_lock(&BTRFS_I(inode)->lock);
1700                         BTRFS_I(inode)->outstanding_extents++;
1701                         spin_unlock(&BTRFS_I(inode)->lock);
1702                 }
1703
1704                 /* For sanity tests */
1705                 if (btrfs_test_is_dummy_root(root))
1706                         return;
1707
1708                 __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
1709                                      root->fs_info->delalloc_batch);
1710                 spin_lock(&BTRFS_I(inode)->lock);
1711                 BTRFS_I(inode)->delalloc_bytes += len;
1712                 if (*bits & EXTENT_DEFRAG)
1713                         BTRFS_I(inode)->defrag_bytes += len;
1714                 if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1715                                          &BTRFS_I(inode)->runtime_flags))
1716                         btrfs_add_delalloc_inodes(root, inode);
1717                 spin_unlock(&BTRFS_I(inode)->lock);
1718         }
1719 }
1720
1721 /*
1722  * extent_io.c clear_bit_hook, see set_bit_hook for why
1723  */
1724 static void btrfs_clear_bit_hook(struct inode *inode,
1725                                  struct extent_state *state,
1726                                  unsigned *bits)
1727 {
1728         u64 len = state->end + 1 - state->start;
1729         u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
1730                                     BTRFS_MAX_EXTENT_SIZE);
1731
1732         spin_lock(&BTRFS_I(inode)->lock);
1733         if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
1734                 BTRFS_I(inode)->defrag_bytes -= len;
1735         spin_unlock(&BTRFS_I(inode)->lock);
1736
1737         /*
1738          * set_bit and clear bit hooks normally require _irqsave/restore
1739          * but in this case, we are only testing for the DELALLOC
1740          * bit, which is only set or cleared with irqs on
1741          */
1742         if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
1743                 struct btrfs_root *root = BTRFS_I(inode)->root;
1744                 bool do_list = !btrfs_is_free_space_inode(inode);
1745
1746                 if (*bits & EXTENT_FIRST_DELALLOC) {
1747                         *bits &= ~EXTENT_FIRST_DELALLOC;
1748                 } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
1749                         spin_lock(&BTRFS_I(inode)->lock);
1750                         BTRFS_I(inode)->outstanding_extents -= num_extents;
1751                         spin_unlock(&BTRFS_I(inode)->lock);
1752                 }
1753
1754                 /*
1755                  * We don't reserve metadata space for space cache inodes so we
1756                  * don't need to call dellalloc_release_metadata if there is an
1757                  * error.
1758                  */
1759                 if (*bits & EXTENT_DO_ACCOUNTING &&
1760                     root != root->fs_info->tree_root)
1761                         btrfs_delalloc_release_metadata(inode, len);
1762
1763                 /* For sanity tests. */
1764                 if (btrfs_test_is_dummy_root(root))
1765                         return;
1766
1767                 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
1768                     && do_list && !(state->state & EXTENT_NORESERVE))
1769                         btrfs_free_reserved_data_space(inode, len);
1770
1771                 __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
1772                                      root->fs_info->delalloc_batch);
1773                 spin_lock(&BTRFS_I(inode)->lock);
1774                 BTRFS_I(inode)->delalloc_bytes -= len;
1775                 if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
1776                     test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
1777                              &BTRFS_I(inode)->runtime_flags))
1778                         btrfs_del_delalloc_inode(root, inode);
1779                 spin_unlock(&BTRFS_I(inode)->lock);
1780         }
1781 }
1782
1783 /*
1784  * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1785  * we don't create bios that span stripes or chunks
1786  */
1787 int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1788                          size_t size, struct bio *bio,
1789                          unsigned long bio_flags)
1790 {
1791         struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1792         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1793         u64 length = 0;
1794         u64 map_length;
1795         int ret;
1796
1797         if (bio_flags & EXTENT_BIO_COMPRESSED)
1798                 return 0;
1799
1800         length = bio->bi_iter.bi_size;
1801         map_length = length;
1802         ret = btrfs_map_block(root->fs_info, rw, logical,
1803                               &map_length, NULL, 0);
1804         /* Will always return 0 with map_multi == NULL */
1805         BUG_ON(ret < 0);
1806         if (map_length < length + size)
1807                 return 1;
1808         return 0;
1809 }
1810
1811 /*
1812  * in order to insert checksums into the metadata in large chunks,
1813  * we wait until bio submission time.   All the pages in the bio are
1814  * checksummed and sums are attached onto the ordered extent record.
1815  *
1816  * At IO completion time the cums attached on the ordered extent record
1817  * are inserted into the btree
1818  */
1819 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1820                                     struct bio *bio, int mirror_num,
1821                                     unsigned long bio_flags,
1822                                     u64 bio_offset)
1823 {
1824         struct btrfs_root *root = BTRFS_I(inode)->root;
1825         int ret = 0;
1826
1827         ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1828         BUG_ON(ret); /* -ENOMEM */
1829         return 0;
1830 }
1831
1832 /*
1833  * in order to insert checksums into the metadata in large chunks,
1834  * we wait until bio submission time.   All the pages in the bio are
1835  * checksummed and sums are attached onto the ordered extent record.
1836  *
1837  * At IO completion time the cums attached on the ordered extent record
1838  * are inserted into the btree
1839  */
1840 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1841                           int mirror_num, unsigned long bio_flags,
1842                           u64 bio_offset)
1843 {
1844         struct btrfs_root *root = BTRFS_I(inode)->root;
1845         int ret;
1846
1847         ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
1848         if (ret)
1849                 bio_endio(bio, ret);
1850         return ret;
1851 }
1852
1853 /*
1854  * extent_io.c submission hook. This does the right thing for csum calculation
1855  * on write, or reading the csums from the tree before a read
1856  */
1857 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1858                           int mirror_num, unsigned long bio_flags,
1859                           u64 bio_offset)
1860 {
1861         struct btrfs_root *root = BTRFS_I(inode)->root;
1862         int ret = 0;
1863         int skip_sum;
1864         int metadata = 0;
1865         int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
1866
1867         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1868
1869         if (btrfs_is_free_space_inode(inode))
1870                 metadata = 2;
1871
1872         if (!(rw & REQ_WRITE)) {
1873                 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1874                 if (ret)
1875                         goto out;
1876
1877                 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1878                         ret = btrfs_submit_compressed_read(inode, bio,
1879                                                            mirror_num,
1880                                                            bio_flags);
1881                         goto out;
1882                 } else if (!skip_sum) {
1883                         ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
1884                         if (ret)
1885                                 goto out;
1886                 }
1887                 goto mapit;
1888         } else if (async && !skip_sum) {
1889                 /* csum items have already been cloned */
1890                 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1891                         goto mapit;
1892                 /* we're doing a write, do the async checksumming */
1893                 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1894                                    inode, rw, bio, mirror_num,
1895                                    bio_flags, bio_offset,
1896                                    __btrfs_submit_bio_start,
1897                                    __btrfs_submit_bio_done);
1898                 goto out;
1899         } else if (!skip_sum) {
1900                 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1901                 if (ret)
1902                         goto out;
1903         }
1904
1905 mapit:
1906         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
1907
1908 out:
1909         if (ret < 0)
1910                 bio_endio(bio, ret);
1911         return ret;
1912 }
1913
1914 /*
1915  * given a list of ordered sums record them in the inode.  This happens
1916  * at IO completion time based on sums calculated at bio submission time.
1917  */
1918 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1919                              struct inode *inode, u64 file_offset,
1920                              struct list_head *list)
1921 {
1922         struct btrfs_ordered_sum *sum;
1923
1924         list_for_each_entry(sum, list, list) {
1925                 trans->adding_csums = 1;
1926                 btrfs_csum_file_blocks(trans,
1927                        BTRFS_I(inode)->root->fs_info->csum_root, sum);
1928                 trans->adding_csums = 0;
1929         }
1930         return 0;
1931 }
1932
1933 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
1934                               struct extent_state **cached_state)
1935 {
1936         WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0);
1937         return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1938                                    cached_state, GFP_NOFS);
1939 }
1940
1941 /* see btrfs_writepage_start_hook for details on why this is required */
1942 struct btrfs_writepage_fixup {
1943         struct page *page;
1944         struct btrfs_work work;
1945 };
1946
1947 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1948 {
1949         struct btrfs_writepage_fixup *fixup;
1950         struct btrfs_ordered_extent *ordered;
1951         struct extent_state *cached_state = NULL;
1952         struct page *page;
1953         struct inode *inode;
1954         u64 page_start;
1955         u64 page_end;
1956         int ret;
1957
1958         fixup = container_of(work, struct btrfs_writepage_fixup, work);
1959         page = fixup->page;
1960 again:
1961         lock_page(page);
1962         if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1963                 ClearPageChecked(page);
1964                 goto out_page;
1965         }
1966
1967         inode = page->mapping->host;
1968         page_start = page_offset(page);
1969         page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1970
1971         lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, 0,
1972                          &cached_state);
1973
1974         /* already ordered? We're done */
1975         if (PagePrivate2(page))
1976                 goto out;
1977
1978         ordered = btrfs_lookup_ordered_extent(inode, page_start);
1979         if (ordered) {
1980                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
1981                                      page_end, &cached_state, GFP_NOFS);
1982                 unlock_page(page);
1983                 btrfs_start_ordered_extent(inode, ordered, 1);
1984                 btrfs_put_ordered_extent(ordered);
1985                 goto again;
1986         }
1987
1988         ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
1989         if (ret) {
1990                 mapping_set_error(page->mapping, ret);
1991                 end_extent_writepage(page, ret, page_start, page_end);
1992                 ClearPageChecked(page);
1993                 goto out;
1994          }
1995
1996         btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
1997         ClearPageChecked(page);
1998         set_page_dirty(page);
1999 out:
2000         unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
2001                              &cached_state, GFP_NOFS);
2002 out_page:
2003         unlock_page(page);
2004         page_cache_release(page);
2005         kfree(fixup);
2006 }
2007
2008 /*
2009  * There are a few paths in the higher layers of the kernel that directly
2010  * set the page dirty bit without asking the filesystem if it is a
2011  * good idea.  This causes problems because we want to make sure COW
2012  * properly happens and the data=ordered rules are followed.
2013  *
2014  * In our case any range that doesn't have the ORDERED bit set
2015  * hasn't been properly setup for IO.  We kick off an async process
2016  * to fix it up.  The async helper will wait for ordered extents, set
2017  * the delalloc bit and make it safe to write the page.
2018  */
2019 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
2020 {
2021         struct inode *inode = page->mapping->host;
2022         struct btrfs_writepage_fixup *fixup;
2023         struct btrfs_root *root = BTRFS_I(inode)->root;
2024
2025         /* this page is properly in the ordered list */
2026         if (TestClearPagePrivate2(page))
2027                 return 0;
2028
2029         if (PageChecked(page))
2030                 return -EAGAIN;
2031
2032         fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
2033         if (!fixup)
2034                 return -EAGAIN;
2035
2036         SetPageChecked(page);
2037         page_cache_get(page);
2038         btrfs_init_work(&fixup->work, btrfs_fixup_helper,
2039                         btrfs_writepage_fixup_worker, NULL, NULL);
2040         fixup->page = page;
2041         btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work);
2042         return -EBUSY;
2043 }
2044
2045 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
2046                                        struct inode *inode, u64 file_pos,
2047                                        u64 disk_bytenr, u64 disk_num_bytes,
2048                                        u64 num_bytes, u64 ram_bytes,
2049                                        u8 compression, u8 encryption,
2050                                        u16 other_encoding, int extent_type)
2051 {
2052         struct btrfs_root *root = BTRFS_I(inode)->root;
2053         struct btrfs_file_extent_item *fi;
2054         struct btrfs_path *path;
2055         struct extent_buffer *leaf;
2056         struct btrfs_key ins;
2057         int extent_inserted = 0;
2058         int ret;
2059
2060         path = btrfs_alloc_path();
2061         if (!path)
2062                 return -ENOMEM;
2063
2064         /*
2065          * we may be replacing one extent in the tree with another.
2066          * The new extent is pinned in the extent map, and we don't want
2067          * to drop it from the cache until it is completely in the btree.
2068          *
2069          * So, tell btrfs_drop_extents to leave this extent in the cache.
2070          * the caller is expected to unpin it and allow it to be merged
2071          * with the others.
2072          */
2073         ret = __btrfs_drop_extents(trans, root, inode, path, file_pos,
2074                                    file_pos + num_bytes, NULL, 0,
2075                                    1, sizeof(*fi), &extent_inserted);
2076         if (ret)
2077                 goto out;
2078
2079         if (!extent_inserted) {
2080                 ins.objectid = btrfs_ino(inode);
2081                 ins.offset = file_pos;
2082                 ins.type = BTRFS_EXTENT_DATA_KEY;
2083
2084                 path->leave_spinning = 1;
2085                 ret = btrfs_insert_empty_item(trans, root, path, &ins,
2086                                               sizeof(*fi));
2087                 if (ret)
2088                         goto out;
2089         }
2090         leaf = path->nodes[0];
2091         fi = btrfs_item_ptr(leaf, path->slots[0],
2092                             struct btrfs_file_extent_item);
2093         btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2094         btrfs_set_file_extent_type(leaf, fi, extent_type);
2095         btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
2096         btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
2097         btrfs_set_file_extent_offset(leaf, fi, 0);
2098         btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2099         btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
2100         btrfs_set_file_extent_compression(leaf, fi, compression);
2101         btrfs_set_file_extent_encryption(leaf, fi, encryption);
2102         btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
2103
2104         btrfs_mark_buffer_dirty(leaf);
2105         btrfs_release_path(path);
2106
2107         inode_add_bytes(inode, num_bytes);
2108
2109         ins.objectid = disk_bytenr;
2110         ins.offset = disk_num_bytes;
2111         ins.type = BTRFS_EXTENT_ITEM_KEY;
2112         ret = btrfs_alloc_reserved_file_extent(trans, root,
2113                                         root->root_key.objectid,
2114                                         btrfs_ino(inode), file_pos, &ins);
2115 out:
2116         btrfs_free_path(path);
2117
2118         return ret;
2119 }
2120
2121 /* snapshot-aware defrag */
2122 struct sa_defrag_extent_backref {
2123         struct rb_node node;
2124         struct old_sa_defrag_extent *old;
2125         u64 root_id;
2126         u64 inum;
2127         u64 file_pos;
2128         u64 extent_offset;
2129         u64 num_bytes;
2130         u64 generation;
2131 };
2132
2133 struct old_sa_defrag_extent {
2134         struct list_head list;
2135         struct new_sa_defrag_extent *new;
2136
2137         u64 extent_offset;
2138         u64 bytenr;
2139         u64 offset;
2140         u64 len;
2141         int count;
2142 };
2143
2144 struct new_sa_defrag_extent {
2145         struct rb_root root;
2146         struct list_head head;
2147         struct btrfs_path *path;
2148         struct inode *inode;
2149         u64 file_pos;
2150         u64 len;
2151         u64 bytenr;
2152         u64 disk_len;
2153         u8 compress_type;
2154 };
2155
2156 static int backref_comp(struct sa_defrag_extent_backref *b1,
2157                         struct sa_defrag_extent_backref *b2)
2158 {
2159         if (b1->root_id < b2->root_id)
2160                 return -1;
2161         else if (b1->root_id > b2->root_id)
2162                 return 1;
2163
2164         if (b1->inum < b2->inum)
2165                 return -1;
2166         else if (b1->inum > b2->inum)
2167                 return 1;
2168
2169         if (b1->file_pos < b2->file_pos)
2170                 return -1;
2171         else if (b1->file_pos > b2->file_pos)
2172                 return 1;
2173
2174         /*
2175          * [------------------------------] ===> (a range of space)
2176          *     |<--->|   |<---->| =============> (fs/file tree A)
2177          * |<---------------------------->| ===> (fs/file tree B)
2178          *
2179          * A range of space can refer to two file extents in one tree while
2180          * refer to only one file extent in another tree.
2181          *
2182          * So we may process a disk offset more than one time(two extents in A)
2183          * and locate at the same extent(one extent in B), then insert two same
2184          * backrefs(both refer to the extent in B).
2185          */
2186         return 0;
2187 }
2188
2189 static void backref_insert(struct rb_root *root,
2190                            struct sa_defrag_extent_backref *backref)
2191 {
2192         struct rb_node **p = &root->rb_node;
2193         struct rb_node *parent = NULL;
2194         struct sa_defrag_extent_backref *entry;
2195         int ret;
2196
2197         while (*p) {
2198                 parent = *p;
2199                 entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
2200
2201                 ret = backref_comp(backref, entry);
2202                 if (ret < 0)
2203                         p = &(*p)->rb_left;
2204                 else
2205                         p = &(*p)->rb_right;
2206         }
2207
2208         rb_link_node(&backref->node, parent, p);
2209         rb_insert_color(&backref->node, root);
2210 }
2211
2212 /*
2213  * Note the backref might has changed, and in this case we just return 0.
2214  */
2215 static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
2216                                        void *ctx)
2217 {
2218         struct btrfs_file_extent_item *extent;
2219         struct btrfs_fs_info *fs_info;
2220         struct old_sa_defrag_extent *old = ctx;
2221         struct new_sa_defrag_extent *new = old->new;
2222         struct btrfs_path *path = new->path;
2223         struct btrfs_key key;
2224         struct btrfs_root *root;
2225         struct sa_defrag_extent_backref *backref;
2226         struct extent_buffer *leaf;
2227         struct inode *inode = new->inode;
2228         int slot;
2229         int ret;
2230         u64 extent_offset;
2231         u64 num_bytes;
2232
2233         if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
2234             inum == btrfs_ino(inode))
2235                 return 0;
2236
2237         key.objectid = root_id;
2238         key.type = BTRFS_ROOT_ITEM_KEY;
2239         key.offset = (u64)-1;
2240
2241         fs_info = BTRFS_I(inode)->root->fs_info;
2242         root = btrfs_read_fs_root_no_name(fs_info, &key);
2243         if (IS_ERR(root)) {
2244                 if (PTR_ERR(root) == -ENOENT)
2245                         return 0;
2246                 WARN_ON(1);
2247                 pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
2248                          inum, offset, root_id);
2249                 return PTR_ERR(root);
2250         }
2251
2252         key.objectid = inum;
2253         key.type = BTRFS_EXTENT_DATA_KEY;
2254         if (offset > (u64)-1 << 32)
2255                 key.offset = 0;
2256         else
2257                 key.offset = offset;
2258
2259         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2260         if (WARN_ON(ret < 0))
2261                 return ret;
2262         ret = 0;
2263
2264         while (1) {
2265                 cond_resched();
2266
2267                 leaf = path->nodes[0];
2268                 slot = path->slots[0];
2269
2270                 if (slot >= btrfs_header_nritems(leaf)) {
2271                         ret = btrfs_next_leaf(root, path);
2272                         if (ret < 0) {
2273                                 goto out;
2274                         } else if (ret > 0) {
2275                                 ret = 0;
2276                                 goto out;
2277                         }
2278                         continue;
2279                 }
2280
2281                 path->slots[0]++;
2282
2283                 btrfs_item_key_to_cpu(leaf, &key, slot);
2284
2285                 if (key.objectid > inum)
2286                         goto out;
2287
2288                 if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
2289                         continue;
2290
2291                 extent = btrfs_item_ptr(leaf, slot,
2292                                         struct btrfs_file_extent_item);
2293
2294                 if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
2295                         continue;
2296
2297                 /*
2298                  * 'offset' refers to the exact key.offset,
2299                  * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2300                  * (key.offset - extent_offset).
2301                  */
2302                 if (key.offset != offset)
2303                         continue;
2304
2305                 extent_offset = btrfs_file_extent_offset(leaf, extent);
2306                 num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
2307
2308                 if (extent_offset >= old->extent_offset + old->offset +
2309                     old->len || extent_offset + num_bytes <=
2310                     old->extent_offset + old->offset)
2311                         continue;
2312                 break;
2313         }
2314
2315         backref = kmalloc(sizeof(*backref), GFP_NOFS);
2316         if (!backref) {
2317                 ret = -ENOENT;
2318                 goto out;
2319         }
2320
2321         backref->root_id = root_id;
2322         backref->inum = inum;
2323         backref->file_pos = offset;
2324         backref->num_bytes = num_bytes;
2325         backref->extent_offset = extent_offset;
2326         backref->generation = btrfs_file_extent_generation(leaf, extent);
2327         backref->old = old;
2328         backref_insert(&new->root, backref);
2329         old->count++;
2330 out:
2331         btrfs_release_path(path);
2332         WARN_ON(ret);
2333         return ret;
2334 }
2335
2336 static noinline bool record_extent_backrefs(struct btrfs_path *path,
2337                                    struct new_sa_defrag_extent *new)
2338 {
2339         struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
2340         struct old_sa_defrag_extent *old, *tmp;
2341         int ret;
2342
2343         new->path = path;
2344
2345         list_for_each_entry_safe(old, tmp, &new->head, list) {
2346                 ret = iterate_inodes_from_logical(old->bytenr +
2347                                                   old->extent_offset, fs_info,
2348                                                   path, record_one_backref,
2349                                                   old);
2350                 if (ret < 0 && ret != -ENOENT)
2351                         return false;
2352
2353                 /* no backref to be processed for this extent */
2354                 if (!old->count) {
2355                         list_del(&old->list);
2356                         kfree(old);
2357                 }
2358         }
2359
2360         if (list_empty(&new->head))
2361                 return false;
2362
2363         return true;
2364 }
2365
2366 static int relink_is_mergable(struct extent_buffer *leaf,
2367                               struct btrfs_file_extent_item *fi,
2368                               struct new_sa_defrag_extent *new)
2369 {
2370         if (btrfs_file_extent_disk_bytenr(leaf, fi) != new->bytenr)
2371                 return 0;
2372
2373         if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2374                 return 0;
2375
2376         if (btrfs_file_extent_compression(leaf, fi) != new->compress_type)
2377                 return 0;
2378
2379         if (btrfs_file_extent_encryption(leaf, fi) ||
2380             btrfs_file_extent_other_encoding(leaf, fi))
2381                 return 0;
2382
2383         return 1;
2384 }
2385
2386 /*
2387  * Note the backref might has changed, and in this case we just return 0.
2388  */
2389 static noinline int relink_extent_backref(struct btrfs_path *path,
2390                                  struct sa_defrag_extent_backref *prev,
2391                                  struct sa_defrag_extent_backref *backref)
2392 {
2393         struct btrfs_file_extent_item *extent;
2394         struct btrfs_file_extent_item *item;
2395         struct btrfs_ordered_extent *ordered;
2396         struct btrfs_trans_handle *trans;
2397         struct btrfs_fs_info *fs_info;
2398         struct btrfs_root *root;
2399         struct btrfs_key key;
2400         struct extent_buffer *leaf;
2401         struct old_sa_defrag_extent *old = backref->old;
2402         struct new_sa_defrag_extent *new = old->new;
2403         struct inode *src_inode = new->inode;
2404         struct inode *inode;
2405         struct extent_state *cached = NULL;
2406         int ret = 0;
2407         u64 start;
2408         u64 len;
2409         u64 lock_start;
2410         u64 lock_end;
2411         bool merge = false;
2412         int index;
2413
2414         if (prev && prev->root_id == backref->root_id &&
2415             prev->inum == backref->inum &&
2416             prev->file_pos + prev->num_bytes == backref->file_pos)
2417                 merge = true;
2418
2419         /* step 1: get root */
2420         key.objectid = backref->root_id;
2421         key.type = BTRFS_ROOT_ITEM_KEY;
2422         key.offset = (u64)-1;
2423
2424         fs_info = BTRFS_I(src_inode)->root->fs_info;
2425         index = srcu_read_lock(&fs_info->subvol_srcu);
2426
2427         root = btrfs_read_fs_root_no_name(fs_info, &key);
2428         if (IS_ERR(root)) {
2429                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2430                 if (PTR_ERR(root) == -ENOENT)
2431                         return 0;
2432                 return PTR_ERR(root);
2433         }
2434
2435         if (btrfs_root_readonly(root)) {
2436                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2437                 return 0;
2438         }
2439
2440         /* step 2: get inode */
2441         key.objectid = backref->inum;
2442         key.type = BTRFS_INODE_ITEM_KEY;
2443         key.offset = 0;
2444
2445         inode = btrfs_iget(fs_info->sb, &key, root, NULL);
2446         if (IS_ERR(inode)) {
2447                 srcu_read_unlock(&fs_info->subvol_srcu, index);
2448                 return 0;
2449         }
2450
2451         srcu_read_unlock(&fs_info->subvol_srcu, index);
2452
2453         /* step 3: relink backref */
2454         lock_start = backref->file_pos;
2455         lock_end = backref->file_pos + backref->num_bytes - 1;
2456         lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2457                          0, &cached);
2458
2459         ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
2460         if (ordered) {
2461                 btrfs_put_ordered_extent(ordered);
2462                 goto out_unlock;
2463         }
2464
2465         trans = btrfs_join_transaction(root);
2466         if (IS_ERR(trans)) {
2467                 ret = PTR_ERR(trans);
2468                 goto out_unlock;
2469         }
2470
2471         key.objectid = backref->inum;
2472         key.type = BTRFS_EXTENT_DATA_KEY;
2473         key.offset = backref->file_pos;
2474
2475         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2476         if (ret < 0) {
2477                 goto out_free_path;
2478         } else if (ret > 0) {
2479                 ret = 0;
2480                 goto out_free_path;
2481         }
2482
2483         extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
2484                                 struct btrfs_file_extent_item);
2485
2486         if (btrfs_file_extent_generation(path->nodes[0], extent) !=
2487             backref->generation)
2488                 goto out_free_path;
2489
2490         btrfs_release_path(path);
2491
2492         start = backref->file_pos;
2493         if (backref->extent_offset < old->extent_offset + old->offset)
2494                 start += old->extent_offset + old->offset -
2495                          backref->extent_offset;
2496
2497         len = min(backref->extent_offset + backref->num_bytes,
2498                   old->extent_offset + old->offset + old->len);
2499         len -= max(backref->extent_offset, old->extent_offset + old->offset);
2500
2501         ret = btrfs_drop_extents(trans, root, inode, start,
2502                                  start + len, 1);
2503         if (ret)
2504                 goto out_free_path;
2505 again:
2506         key.objectid = btrfs_ino(inode);
2507         key.type = BTRFS_EXTENT_DATA_KEY;
2508         key.offset = start;
2509
2510         path->leave_spinning = 1;
2511         if (merge) {
2512                 struct btrfs_file_extent_item *fi;
2513                 u64 extent_len;
2514                 struct btrfs_key found_key;
2515
2516                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2517                 if (ret < 0)
2518                         goto out_free_path;
2519
2520                 path->slots[0]--;
2521                 leaf = path->nodes[0];
2522                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2523
2524                 fi = btrfs_item_ptr(leaf, path->slots[0],
2525                                     struct btrfs_file_extent_item);
2526                 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
2527
2528                 if (extent_len + found_key.offset == start &&
2529                     relink_is_mergable(leaf, fi, new)) {
2530                         btrfs_set_file_extent_num_bytes(leaf, fi,
2531                                                         extent_len + len);
2532                         btrfs_mark_buffer_dirty(leaf);
2533                         inode_add_bytes(inode, len);
2534
2535                         ret = 1;
2536                         goto out_free_path;
2537                 } else {
2538                         merge = false;
2539                         btrfs_release_path(path);
2540                         goto again;
2541                 }
2542         }
2543
2544         ret = btrfs_insert_empty_item(trans, root, path, &key,
2545                                         sizeof(*extent));
2546         if (ret) {
2547                 btrfs_abort_transaction(trans, root, ret);
2548                 goto out_free_path;
2549         }
2550
2551         leaf = path->nodes[0];
2552         item = btrfs_item_ptr(leaf, path->slots[0],
2553                                 struct btrfs_file_extent_item);
2554         btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
2555         btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
2556         btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
2557         btrfs_set_file_extent_num_bytes(leaf, item, len);
2558         btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
2559         btrfs_set_file_extent_generation(leaf, item, trans->transid);
2560         btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
2561         btrfs_set_file_extent_compression(leaf, item, new->compress_type);
2562         btrfs_set_file_extent_encryption(leaf, item, 0);
2563         btrfs_set_file_extent_other_encoding(leaf, item, 0);
2564
2565         btrfs_mark_buffer_dirty(leaf);
2566         inode_add_bytes(inode, len);
2567         btrfs_release_path(path);
2568
2569         ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
2570                         new->disk_len, 0,
2571                         backref->root_id, backref->inum,
2572                         new->file_pos, 0);      /* start - extent_offset */
2573         if (ret) {
2574                 btrfs_abort_transaction(trans, root, ret);
2575                 goto out_free_path;
2576         }
2577
2578         ret = 1;
2579 out_free_path:
2580         btrfs_release_path(path);
2581         path->leave_spinning = 0;
2582         btrfs_end_transaction(trans, root);
2583 out_unlock:
2584         unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
2585                              &cached, GFP_NOFS);
2586         iput(inode);
2587         return ret;
2588 }
2589
2590 static void free_sa_defrag_extent(struct new_sa_defrag_extent *new)
2591 {
2592         struct old_sa_defrag_extent *old, *tmp;
2593
2594         if (!new)
2595                 return;
2596
2597         list_for_each_entry_safe(old, tmp, &new->head, list) {
2598                 list_del(&old->list);
2599                 kfree(old);
2600         }
2601         kfree(new);
2602 }
2603
2604 static void relink_file_extents(struct new_sa_defrag_extent *new)
2605 {
2606         struct btrfs_path *path;
2607         struct sa_defrag_extent_backref *backref;
2608         struct sa_defrag_extent_backref *prev = NULL;
2609         struct inode *inode;
2610         struct btrfs_root *root;
2611         struct rb_node *node;
2612         int ret;
2613
2614         inode = new->inode;
2615         root = BTRFS_I(inode)->root;
2616
2617         path = btrfs_alloc_path();
2618         if (!path)
2619                 return;
2620
2621         if (!record_extent_backrefs(path, new)) {
2622                 btrfs_free_path(path);
2623                 goto out;
2624         }
2625         btrfs_release_path(path);
2626
2627         while (1) {
2628                 node = rb_first(&new->root);
2629                 if (!node)
2630                         break;
2631                 rb_erase(node, &new->root);
2632
2633                 backref = rb_entry(node, struct sa_defrag_extent_backref, node);
2634
2635                 ret = relink_extent_backref(path, prev, backref);
2636                 WARN_ON(ret < 0);
2637
2638                 kfree(prev);
2639
2640                 if (ret == 1)
2641                         prev = backref;
2642                 else
2643                         prev = NULL;
2644                 cond_resched();
2645         }
2646         kfree(prev);
2647
2648         btrfs_free_path(path);
2649 out:
2650         free_sa_defrag_extent(new);
2651
2652         atomic_dec(&root->fs_info->defrag_running);
2653         wake_up(&root->fs_info->transaction_wait);
2654 }
2655
2656 static struct new_sa_defrag_extent *
2657 record_old_file_extents(struct inode *inode,
2658                         struct btrfs_ordered_extent *ordered)
2659 {
2660         struct btrfs_root *root = BTRFS_I(inode)->root;
2661         struct btrfs_path *path;
2662         struct btrfs_key key;
2663         struct old_sa_defrag_extent *old;
2664         struct new_sa_defrag_extent *new;
2665         int ret;
2666
2667         new = kmalloc(sizeof(*new), GFP_NOFS);
2668         if (!new)
2669                 return NULL;
2670
2671         new->inode = inode;
2672         new->file_pos = ordered->file_offset;
2673         new->len = ordered->len;
2674         new->bytenr = ordered->start;
2675         new->disk_len = ordered->disk_len;
2676         new->compress_type = ordered->compress_type;
2677         new->root = RB_ROOT;
2678         INIT_LIST_HEAD(&new->head);
2679
2680         path = btrfs_alloc_path();
2681         if (!path)
2682                 goto out_kfree;
2683
2684         key.objectid = btrfs_ino(inode);
2685         key.type = BTRFS_EXTENT_DATA_KEY;
2686         key.offset = new->file_pos;
2687
2688         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2689         if (ret < 0)
2690                 goto out_free_path;
2691         if (ret > 0 && path->slots[0] > 0)
2692                 path->slots[0]--;
2693
2694         /* find out all the old extents for the file range */
2695         while (1) {
2696                 struct btrfs_file_extent_item *extent;
2697                 struct extent_buffer *l;
2698                 int slot;
2699                 u64 num_bytes;
2700                 u64 offset;
2701                 u64 end;
2702                 u64 disk_bytenr;
2703                 u64 extent_offset;
2704
2705                 l = path->nodes[0];
2706                 slot = path->slots[0];
2707
2708                 if (slot >= btrfs_header_nritems(l)) {
2709                         ret = btrfs_next_leaf(root, path);
2710                         if (ret < 0)
2711                                 goto out_free_path;
2712                         else if (ret > 0)
2713                                 break;
2714                         continue;
2715                 }
2716
2717                 btrfs_item_key_to_cpu(l, &key, slot);
2718
2719                 if (key.objectid != btrfs_ino(inode))
2720                         break;
2721                 if (key.type != BTRFS_EXTENT_DATA_KEY)
2722                         break;
2723                 if (key.offset >= new->file_pos + new->len)
2724                         break;
2725
2726                 extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
2727
2728                 num_bytes = btrfs_file_extent_num_bytes(l, extent);
2729                 if (key.offset + num_bytes < new->file_pos)
2730                         goto next;
2731
2732                 disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
2733                 if (!disk_bytenr)
2734                         goto next;
2735
2736                 extent_offset = btrfs_file_extent_offset(l, extent);
2737
2738                 old = kmalloc(sizeof(*old), GFP_NOFS);
2739                 if (!old)
2740                         goto out_free_path;
2741
2742                 offset = max(new->file_pos, key.offset);
2743                 end = min(new->file_pos + new->len, key.offset + num_bytes);
2744
2745                 old->bytenr = disk_bytenr;
2746                 old->extent_offset = extent_offset;
2747                 old->offset = offset - key.offset;
2748                 old->len = end - offset;
2749                 old->new = new;
2750                 old->count = 0;
2751                 list_add_tail(&old->list, &new->head);
2752 next:
2753                 path->slots[0]++;
2754                 cond_resched();
2755         }
2756
2757         btrfs_free_path(path);
2758         atomic_inc(&root->fs_info->defrag_running);
2759
2760         return new;
2761
2762 out_free_path:
2763         btrfs_free_path(path);
2764 out_kfree:
2765         free_sa_defrag_extent(new);
2766         return NULL;
2767 }
2768
2769 static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
2770                                          u64 start, u64 len)
2771 {
2772         struct btrfs_block_group_cache *cache;
2773
2774         cache = btrfs_lookup_block_group(root->fs_info, start);
2775         ASSERT(cache);
2776
2777         spin_lock(&cache->lock);
2778         cache->delalloc_bytes -= len;
2779         spin_unlock(&cache->lock);
2780
2781         btrfs_put_block_group(cache);
2782 }
2783
2784 /* as ordered data IO finishes, this gets called so we can finish
2785  * an ordered extent if the range of bytes in the file it covers are
2786  * fully written.
2787  */
2788 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
2789 {
2790         struct inode *inode = ordered_extent->inode;
2791         struct btrfs_root *root = BTRFS_I(inode)->root;
2792         struct btrfs_trans_handle *trans = NULL;
2793         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2794         struct extent_state *cached_state = NULL;
2795         struct new_sa_defrag_extent *new = NULL;
2796         int compress_type = 0;
2797         int ret = 0;
2798         u64 logical_len = ordered_extent->len;
2799         bool nolock;
2800         bool truncated = false;
2801
2802         nolock = btrfs_is_free_space_inode(inode);
2803
2804         if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
2805                 ret = -EIO;
2806                 goto out;
2807         }
2808
2809         btrfs_free_io_failure_record(inode, ordered_extent->file_offset,
2810                                      ordered_extent->file_offset +
2811                                      ordered_extent->len - 1);
2812
2813         if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
2814                 truncated = true;
2815                 logical_len = ordered_extent->truncated_len;
2816                 /* Truncated the entire extent, don't bother adding */
2817                 if (!logical_len)
2818                         goto out;
2819         }
2820
2821         if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
2822                 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
2823                 btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2824                 if (nolock)
2825                         trans = btrfs_join_transaction_nolock(root);
2826                 else
2827                         trans = btrfs_join_transaction(root);
2828                 if (IS_ERR(trans)) {
2829                         ret = PTR_ERR(trans);
2830                         trans = NULL;
2831                         goto out;
2832                 }
2833                 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2834                 ret = btrfs_update_inode_fallback(trans, root, inode);
2835                 if (ret) /* -ENOMEM or corruption */
2836                         btrfs_abort_transaction(trans, root, ret);
2837                 goto out;
2838         }
2839
2840         lock_extent_bits(io_tree, ordered_extent->file_offset,
2841                          ordered_extent->file_offset + ordered_extent->len - 1,
2842                          0, &cached_state);
2843
2844         ret = test_range_bit(io_tree, ordered_extent->file_offset,
2845                         ordered_extent->file_offset + ordered_extent->len - 1,
2846                         EXTENT_DEFRAG, 1, cached_state);
2847         if (ret) {
2848                 u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2849                 if (0 && last_snapshot >= BTRFS_I(inode)->generation)
2850                         /* the inode is shared */
2851                         new = record_old_file_extents(inode, ordered_extent);
2852
2853                 clear_extent_bit(io_tree, ordered_extent->file_offset,
2854                         ordered_extent->file_offset + ordered_extent->len - 1,
2855                         EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
2856         }
2857
2858         if (nolock)
2859                 trans = btrfs_join_transaction_nolock(root);
2860         else
2861                 trans = btrfs_join_transaction(root);
2862         if (IS_ERR(trans)) {
2863                 ret = PTR_ERR(trans);
2864                 trans = NULL;
2865                 goto out_unlock;
2866         }
2867
2868         trans->block_rsv = &root->fs_info->delalloc_block_rsv;
2869
2870         if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
2871                 compress_type = ordered_extent->compress_type;
2872         if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
2873                 BUG_ON(compress_type);
2874                 ret = btrfs_mark_extent_written(trans, inode,
2875                                                 ordered_extent->file_offset,
2876                                                 ordered_extent->file_offset +
2877                                                 logical_len);
2878         } else {
2879                 BUG_ON(root == root->fs_info->tree_root);
2880                 ret = insert_reserved_file_extent(trans, inode,
2881                                                 ordered_extent->file_offset,
2882                                                 ordered_extent->start,
2883                                                 ordered_extent->disk_len,
2884                                                 logical_len, logical_len,
2885                                                 compress_type, 0, 0,
2886                                                 BTRFS_FILE_EXTENT_REG);
2887                 if (!ret)
2888                         btrfs_release_delalloc_bytes(root,
2889                                                      ordered_extent->start,
2890                                                      ordered_extent->disk_len);
2891         }
2892         unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
2893                            ordered_extent->file_offset, ordered_extent->len,
2894                            trans->transid);
2895         if (ret < 0) {
2896                 btrfs_abort_transaction(trans, root, ret);
2897                 goto out_unlock;
2898         }
2899
2900         add_pending_csums(trans, inode, ordered_extent->file_offset,
2901                           &ordered_extent->list);
2902
2903         btrfs_ordered_update_i_size(inode, 0, ordered_extent);
2904         ret = btrfs_update_inode_fallback(trans, root, inode);
2905         if (ret) { /* -ENOMEM or corruption */
2906                 btrfs_abort_transaction(trans, root, ret);
2907                 goto out_unlock;
2908         }
2909         ret = 0;
2910 out_unlock:
2911         unlock_extent_cached(io_tree, ordered_extent->file_offset,
2912                              ordered_extent->file_offset +
2913                              ordered_extent->len - 1, &cached_state, GFP_NOFS);
2914 out:
2915         if (root != root->fs_info->tree_root)
2916                 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
2917         if (trans)
2918                 btrfs_end_transaction(trans, root);
2919
2920         if (ret || truncated) {
2921                 u64 start, end;
2922
2923                 if (truncated)
2924                         start = ordered_extent->file_offset + logical_len;
2925                 else
2926                         start = ordered_extent->file_offset;
2927                 end = ordered_extent->file_offset + ordered_extent->len - 1;
2928                 clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS);
2929
2930                 /* Drop the cache for the part of the extent we didn't write. */
2931                 btrfs_drop_extent_cache(inode, start, end, 0);
2932
2933                 /*
2934                  * If the ordered extent had an IOERR or something else went
2935                  * wrong we need to return the space for this ordered extent
2936                  * back to the allocator.  We only free the extent in the
2937                  * truncated case if we didn't write out the extent at all.
2938                  */
2939                 if ((ret || !logical_len) &&
2940                     !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
2941                     !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
2942                         btrfs_free_reserved_extent(root, ordered_extent->start,
2943                                                    ordered_extent->disk_len, 1);
2944         }
2945
2946
2947         /*
2948          * This needs to be done to make sure anybody waiting knows we are done
2949          * updating everything for this ordered extent.
2950          */
2951         btrfs_remove_ordered_extent(inode, ordered_extent);
2952
2953         /* for snapshot-aware defrag */
2954         if (new) {
2955                 if (ret) {
2956                         free_sa_defrag_extent(new);
2957                         atomic_dec(&root->fs_info->defrag_running);
2958                 } else {
2959                         relink_file_extents(new);
2960                 }
2961         }
2962
2963         /* once for us */
2964         btrfs_put_ordered_extent(ordered_extent);
2965         /* once for the tree */
2966         btrfs_put_ordered_extent(ordered_extent);
2967
2968         return ret;
2969 }
2970
2971 static void finish_ordered_fn(struct btrfs_work *work)
2972 {
2973         struct btrfs_ordered_extent *ordered_extent;
2974         ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
2975         btrfs_finish_ordered_io(ordered_extent);
2976 }
2977
2978 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
2979                                 struct extent_state *state, int uptodate)
2980 {
2981         struct inode *inode = page->mapping->host;
2982         struct btrfs_root *root = BTRFS_I(inode)->root;
2983         struct btrfs_ordered_extent *ordered_extent = NULL;
2984         struct btrfs_workqueue *wq;
2985         btrfs_work_func_t func;
2986
2987         trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
2988
2989         ClearPagePrivate2(page);
2990         if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2991                                             end - start + 1, uptodate))
2992                 return 0;
2993
2994         if (btrfs_is_free_space_inode(inode)) {
2995                 wq = root->fs_info->endio_freespace_worker;
2996                 func = btrfs_freespace_write_helper;
2997         } else {
2998                 wq = root->fs_info->endio_write_workers;
2999                 func = btrfs_endio_write_helper;
3000         }
3001
3002         btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL,
3003                         NULL);
3004         btrfs_queue_work(wq, &ordered_extent->work);
3005
3006         return 0;
3007 }
3008
3009 static int __readpage_endio_check(struct inode *inode,
3010                                   struct btrfs_io_bio *io_bio,
3011                                   int icsum, struct page *page,
3012                                   int pgoff, u64 start, size_t len)
3013 {
3014         char *kaddr;
3015         u32 csum_expected;
3016         u32 csum = ~(u32)0;
3017         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
3018                                       DEFAULT_RATELIMIT_BURST);
3019
3020         csum_expected = *(((u32 *)io_bio->csum) + icsum);
3021
3022         kaddr = kmap_atomic(page);
3023         csum = btrfs_csum_data(kaddr + pgoff, csum,  len);
3024         btrfs_csum_final(csum, (char *)&csum);
3025         if (csum != csum_expected)
3026                 goto zeroit;
3027
3028         kunmap_atomic(kaddr);
3029         return 0;
3030 zeroit:
3031         if (__ratelimit(&_rs))
3032                 btrfs_warn(BTRFS_I(inode)->root->fs_info,
3033                            "csum failed ino %llu off %llu csum %u expected csum %u",
3034                            btrfs_ino(inode), start, csum, csum_expected);
3035         memset(kaddr + pgoff, 1, len);
3036         flush_dcache_page(page);
3037         kunmap_atomic(kaddr);
3038         if (csum_expected == 0)
3039                 return 0;
3040         return -EIO;
3041 }
3042
3043 /*
3044  * when reads are done, we need to check csums to verify the data is correct
3045  * if there's a match, we allow the bio to finish.  If not, the code in
3046  * extent_io.c will try to find good copies for us.
3047  */
3048 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
3049                                       u64 phy_offset, struct page *page,
3050                                       u64 start, u64 end, int mirror)
3051 {
3052         size_t offset = start - page_offset(page);
3053         struct inode *inode = page->mapping->host;
3054         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3055         struct btrfs_root *root = BTRFS_I(inode)->root;
3056
3057         if (PageChecked(page)) {
3058                 ClearPageChecked(page);
3059                 return 0;
3060         }
3061
3062         if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
3063                 return 0;
3064
3065         if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
3066             test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
3067                 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
3068                                   GFP_NOFS);
3069                 return 0;
3070         }
3071
3072         phy_offset >>= inode->i_sb->s_blocksize_bits;
3073         return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
3074                                       start, (size_t)(end - start + 1));
3075 }
3076
3077 struct delayed_iput {
3078         struct list_head list;
3079         struct inode *inode;
3080 };
3081
3082 /* JDM: If this is fs-wide, why can't we add a pointer to
3083  * btrfs_inode instead and avoid the allocation? */
3084 void btrfs_add_delayed_iput(struct inode *inode)
3085 {
3086         struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
3087         struct delayed_iput *delayed;
3088
3089         if (atomic_add_unless(&inode->i_count, -1, 1))
3090                 return;
3091
3092         delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
3093         delayed->inode = inode;
3094
3095         spin_lock(&fs_info->delayed_iput_lock);
3096         list_add_tail(&delayed->list, &fs_info->delayed_iputs);
3097         spin_unlock(&fs_info->delayed_iput_lock);
3098 }
3099
3100 void btrfs_run_delayed_iputs(struct btrfs_root *root)
3101 {
3102         LIST_HEAD(list);
3103         struct btrfs_fs_info *fs_info = root->fs_info;
3104         struct delayed_iput *delayed;
3105         int empty;
3106
3107         spin_lock(&fs_info->delayed_iput_lock);
3108         empty = list_empty(&fs_info->delayed_iputs);
3109         spin_unlock(&fs_info->delayed_iput_lock);
3110         if (empty)
3111                 return;
3112
3113         down_read(&fs_info->delayed_iput_sem);
3114
3115         spin_lock(&fs_info->delayed_iput_lock);
3116         list_splice_init(&fs_info->delayed_iputs, &list);
3117         spin_unlock(&fs_info->delayed_iput_lock);
3118
3119         while (!list_empty(&list)) {
3120                 delayed = list_entry(list.next, struct delayed_iput, list);
3121                 list_del(&delayed->list);
3122                 iput(delayed->inode);
3123                 kfree(delayed);
3124         }
3125
3126         up_read(&root->fs_info->delayed_iput_sem);
3127 }
3128
3129 /*
3130  * This is called in transaction commit time. If there are no orphan
3131  * files in the subvolume, it removes orphan item and frees block_rsv
3132  * structure.
3133  */
3134 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
3135                               struct btrfs_root *root)
3136 {
3137         struct btrfs_block_rsv *block_rsv;
3138         int ret;
3139
3140         if (atomic_read(&root->orphan_inodes) ||
3141             root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
3142                 return;
3143
3144         spin_lock(&root->orphan_lock);
3145         if (atomic_read(&root->orphan_inodes)) {
3146                 spin_unlock(&root->orphan_lock);
3147                 return;
3148         }
3149
3150         if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
3151                 spin_unlock(&root->orphan_lock);
3152                 return;
3153         }
3154
3155         block_rsv = root->orphan_block_rsv;
3156         root->orphan_block_rsv = NULL;
3157         spin_unlock(&root->orphan_lock);
3158
3159         if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state) &&
3160             btrfs_root_refs(&root->root_item) > 0) {
3161                 ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
3162                                             root->root_key.objectid);
3163                 if (ret)
3164                         btrfs_abort_transaction(trans, root, ret);
3165                 else
3166                         clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
3167                                   &root->state);
3168         }
3169
3170         if (block_rsv) {
3171                 WARN_ON(block_rsv->size > 0);
3172                 btrfs_free_block_rsv(root, block_rsv);
3173         }
3174 }
3175
3176 /*
3177  * This creates an orphan entry for the given inode in case something goes
3178  * wrong in the middle of an unlink/truncate.
3179  *
3180  * NOTE: caller of this function should reserve 5 units of metadata for
3181  *       this function.
3182  */
3183 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
3184 {
3185         struct btrfs_root *root = BTRFS_I(inode)->root;
3186         struct btrfs_block_rsv *block_rsv = NULL;
3187         int reserve = 0;
3188         int insert = 0;
3189         int ret;
3190
3191         if (!root->orphan_block_rsv) {
3192                 block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
3193                 if (!block_rsv)
3194                         return -ENOMEM;
3195         }
3196
3197         spin_lock(&root->orphan_lock);
3198         if (!root->orphan_block_rsv) {
3199                 root->orphan_block_rsv = block_rsv;
3200         } else if (block_rsv) {
3201                 btrfs_free_block_rsv(root, block_rsv);
3202                 block_rsv = NULL;
3203         }
3204
3205         if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3206                               &BTRFS_I(inode)->runtime_flags)) {
3207 #if 0
3208                 /*
3209                  * For proper ENOSPC handling, we should do orphan
3210                  * cleanup when mounting. But this introduces backward
3211                  * compatibility issue.
3212                  */
3213                 if (!xchg(&root->orphan_item_inserted, 1))
3214                         insert = 2;
3215                 else
3216                         insert = 1;
3217 #endif
3218                 insert = 1;
3219                 atomic_inc(&root->orphan_inodes);
3220         }
3221
3222         if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3223                               &BTRFS_I(inode)->runtime_flags))
3224                 reserve = 1;
3225         spin_unlock(&root->orphan_lock);
3226
3227         /* grab metadata reservation from transaction handle */
3228         if (reserve) {
3229                 ret = btrfs_orphan_reserve_metadata(trans, inode);
3230                 BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
3231         }
3232
3233         /* insert an orphan item to track this unlinked/truncated file */
3234         if (insert >= 1) {
3235                 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
3236                 if (ret) {
3237                         atomic_dec(&root->orphan_inodes);
3238                         if (reserve) {
3239                                 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3240                                           &BTRFS_I(inode)->runtime_flags);
3241                                 btrfs_orphan_release_metadata(inode);
3242                         }
3243                         if (ret != -EEXIST) {
3244                                 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3245                                           &BTRFS_I(inode)->runtime_flags);
3246                                 btrfs_abort_transaction(trans, root, ret);
3247                                 return ret;
3248                         }
3249                 }
3250                 ret = 0;
3251         }
3252
3253         /* insert an orphan item to track subvolume contains orphan files */
3254         if (insert >= 2) {
3255                 ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
3256                                                root->root_key.objectid);
3257                 if (ret && ret != -EEXIST) {
3258                         btrfs_abort_transaction(trans, root, ret);
3259                         return ret;
3260                 }
3261         }
3262         return 0;
3263 }
3264
3265 /*
3266  * We have done the truncate/delete so we can go ahead and remove the orphan
3267  * item for this particular inode.
3268  */
3269 static int btrfs_orphan_del(struct btrfs_trans_handle *trans,
3270                             struct inode *inode)
3271 {
3272         struct btrfs_root *root = BTRFS_I(inode)->root;
3273         int delete_item = 0;
3274         int release_rsv = 0;
3275         int ret = 0;
3276
3277         spin_lock(&root->orphan_lock);
3278         if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3279                                &BTRFS_I(inode)->runtime_flags))
3280                 delete_item = 1;
3281
3282         if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
3283                                &BTRFS_I(inode)->runtime_flags))
3284                 release_rsv = 1;
3285         spin_unlock(&root->orphan_lock);
3286
3287         if (delete_item) {
3288                 atomic_dec(&root->orphan_inodes);
3289                 if (trans)
3290                         ret = btrfs_del_orphan_item(trans, root,
3291                                                     btrfs_ino(inode));
3292         }
3293
3294         if (release_rsv)
3295                 btrfs_orphan_release_metadata(inode);
3296
3297         return ret;
3298 }
3299
3300 /*
3301  * this cleans up any orphans that may be left on the list from the last use
3302  * of this root.
3303  */
3304 int btrfs_orphan_cleanup(struct btrfs_root *root)
3305 {
3306         struct btrfs_path *path;
3307         struct extent_buffer *leaf;
3308         struct btrfs_key key, found_key;
3309         struct btrfs_trans_handle *trans;
3310         struct inode *inode;
3311         u64 last_objectid = 0;
3312         int ret = 0, nr_unlink = 0, nr_truncate = 0;
3313
3314         if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
3315                 return 0;
3316
3317         path = btrfs_alloc_path();
3318         if (!path) {
3319                 ret = -ENOMEM;
3320                 goto out;
3321         }
3322         path->reada = -1;
3323
3324         key.objectid = BTRFS_ORPHAN_OBJECTID;
3325         key.type = BTRFS_ORPHAN_ITEM_KEY;
3326         key.offset = (u64)-1;
3327
3328         while (1) {
3329                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3330                 if (ret < 0)
3331                         goto out;
3332
3333                 /*
3334                  * if ret == 0 means we found what we were searching for, which
3335                  * is weird, but possible, so only screw with path if we didn't
3336                  * find the key and see if we have stuff that matches
3337                  */
3338                 if (ret > 0) {
3339                         ret = 0;
3340                         if (path->slots[0] == 0)
3341                                 break;
3342                         path->slots[0]--;
3343                 }
3344
3345                 /* pull out the item */
3346                 leaf = path->nodes[0];
3347                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3348
3349                 /* make sure the item matches what we want */
3350                 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
3351                         break;
3352                 if (found_key.type != BTRFS_ORPHAN_ITEM_KEY)
3353                         break;
3354
3355                 /* release the path since we're done with it */
3356                 btrfs_release_path(path);
3357
3358                 /*
3359                  * this is where we are basically btrfs_lookup, without the
3360                  * crossing root thing.  we store the inode number in the
3361                  * offset of the orphan item.
3362                  */
3363
3364                 if (found_key.offset == last_objectid) {
3365                         btrfs_err(root->fs_info,
3366                                 "Error removing orphan entry, stopping orphan cleanup");
3367                         ret = -EINVAL;
3368                         goto out;
3369                 }
3370
3371                 last_objectid = found_key.offset;
3372
3373                 found_key.objectid = found_key.offset;
3374                 found_key.type = BTRFS_INODE_ITEM_KEY;
3375                 found_key.offset = 0;
3376                 inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
3377                 ret = PTR_ERR_OR_ZERO(inode);
3378                 if (ret && ret != -ESTALE)
3379                         goto out;
3380
3381                 if (ret == -ESTALE && root == root->fs_info->tree_root) {
3382                         struct btrfs_root *dead_root;
3383                         struct btrfs_fs_info *fs_info = root->fs_info;
3384                         int is_dead_root = 0;
3385
3386                         /*
3387                          * this is an orphan in the tree root. Currently these
3388                          * could come from 2 sources:
3389                          *  a) a snapshot deletion in progress
3390                          *  b) a free space cache inode
3391                          * We need to distinguish those two, as the snapshot
3392                          * orphan must not get deleted.
3393                          * find_dead_roots already ran before us, so if this
3394                          * is a snapshot deletion, we should find the root
3395                          * in the dead_roots list
3396                          */
3397                         spin_lock(&fs_info->trans_lock);
3398                         list_for_each_entry(dead_root, &fs_info->dead_roots,
3399                                             root_list) {
3400                                 if (dead_root->root_key.objectid ==
3401                                     found_key.objectid) {
3402                                         is_dead_root = 1;
3403                                         break;
3404                                 }
3405                         }
3406                         spin_unlock(&fs_info->trans_lock);
3407                         if (is_dead_root) {
3408                                 /* prevent this orphan from being found again */
3409                                 key.offset = found_key.objectid - 1;
3410                                 continue;
3411                         }
3412                 }
3413                 /*
3414                  * Inode is already gone but the orphan item is still there,
3415                  * kill the orphan item.
3416                  */
3417                 if (ret == -ESTALE) {
3418                         trans = btrfs_start_transaction(root, 1);
3419                         if (IS_ERR(trans)) {
3420                                 ret = PTR_ERR(trans);
3421                                 goto out;
3422                         }
3423                         btrfs_debug(root->fs_info, "auto deleting %Lu",
3424                                 found_key.objectid);
3425                         ret = btrfs_del_orphan_item(trans, root,
3426                                                     found_key.objectid);
3427                         btrfs_end_transaction(trans, root);
3428                         if (ret)
3429                                 goto out;
3430                         continue;
3431                 }
3432
3433                 /*
3434                  * add this inode to the orphan list so btrfs_orphan_del does
3435                  * the proper thing when we hit it
3436                  */
3437                 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3438                         &BTRFS_I(inode)->runtime_flags);
3439                 atomic_inc(&root->orphan_inodes);
3440
3441                 /* if we have links, this was a truncate, lets do that */
3442                 if (inode->i_nlink) {
3443                         if (WARN_ON(!S_ISREG(inode->i_mode))) {
3444                                 iput(inode);
3445                                 continue;
3446                         }
3447                         nr_truncate++;
3448
3449                         /* 1 for the orphan item deletion. */
3450                         trans = btrfs_start_transaction(root, 1);
3451                         if (IS_ERR(trans)) {
3452                                 iput(inode);
3453                                 ret = PTR_ERR(trans);
3454                                 goto out;
3455                         }
3456                         ret = btrfs_orphan_add(trans, inode);
3457                         btrfs_end_transaction(trans, root);
3458                         if (ret) {
3459                                 iput(inode);
3460                                 goto out;
3461                         }
3462
3463                         ret = btrfs_truncate(inode);
3464                         if (ret)
3465                                 btrfs_orphan_del(NULL, inode);
3466                 } else {
3467                         nr_unlink++;
3468                 }
3469
3470                 /* this will do delete_inode and everything for us */
3471                 iput(inode);
3472                 if (ret)
3473                         goto out;
3474         }
3475         /* release the path since we're done with it */
3476         btrfs_release_path(path);
3477
3478         root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
3479
3480         if (root->orphan_block_rsv)
3481                 btrfs_block_rsv_release(root, root->orphan_block_rsv,
3482                                         (u64)-1);
3483
3484         if (root->orphan_block_rsv ||
3485             test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) {
3486                 trans = btrfs_join_transaction(root);
3487                 if (!IS_ERR(trans))
3488                         btrfs_end_transaction(trans, root);
3489         }
3490
3491         if (nr_unlink)
3492                 btrfs_debug(root->fs_info, "unlinked %d orphans", nr_unlink);
3493         if (nr_truncate)
3494                 btrfs_debug(root->fs_info, "truncated %d orphans", nr_truncate);
3495
3496 out:
3497         if (ret)
3498                 btrfs_err(root->fs_info,
3499                         "could not do orphan cleanup %d", ret);
3500         btrfs_free_path(path);
3501         return ret;
3502 }
3503
3504 /*
3505  * very simple check to peek ahead in the leaf looking for xattrs.  If we
3506  * don't find any xattrs, we know there can't be any acls.
3507  *
3508  * slot is the slot the inode is in, objectid is the objectid of the inode
3509  */
3510 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
3511                                           int slot, u64 objectid,
3512                                           int *first_xattr_slot)
3513 {
3514         u32 nritems = btrfs_header_nritems(leaf);
3515         struct btrfs_key found_key;
3516         static u64 xattr_access = 0;
3517         static u64 xattr_default = 0;
3518         int scanned = 0;
3519
3520         if (!xattr_access) {
3521                 xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
3522                                         strlen(POSIX_ACL_XATTR_ACCESS));
3523                 xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
3524                                         strlen(POSIX_ACL_XATTR_DEFAULT));
3525         }
3526
3527         slot++;
3528         *first_xattr_slot = -1;
3529         while (slot < nritems) {
3530                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3531
3532                 /* we found a different objectid, there must not be acls */
3533                 if (found_key.objectid != objectid)
3534                         return 0;
3535
3536                 /* we found an xattr, assume we've got an acl */
3537                 if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
3538                         if (*first_xattr_slot == -1)
3539                                 *first_xattr_slot = slot;
3540                         if (found_key.offset == xattr_access ||
3541                             found_key.offset == xattr_default)
3542                                 return 1;
3543                 }
3544
3545                 /*
3546                  * we found a key greater than an xattr key, there can't
3547                  * be any acls later on
3548                  */
3549                 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
3550                         return 0;
3551
3552                 slot++;
3553                 scanned++;
3554
3555                 /*
3556                  * it goes inode, inode backrefs, xattrs, extents,
3557                  * so if there are a ton of hard links to an inode there can
3558                  * be a lot of backrefs.  Don't waste time searching too hard,
3559                  * this is just an optimization
3560                  */
3561                 if (scanned >= 8)
3562                         break;
3563         }
3564         /* we hit the end of the leaf before we found an xattr or
3565          * something larger than an xattr.  We have to assume the inode
3566          * has acls
3567          */
3568         if (*first_xattr_slot == -1)
3569                 *first_xattr_slot = slot;
3570         return 1;
3571 }
3572
3573 /*
3574  * read an inode from the btree into the in-memory inode
3575  */
3576 static void btrfs_read_locked_inode(struct inode *inode)
3577 {
3578         struct btrfs_path *path;
3579         struct extent_buffer *leaf;
3580         struct btrfs_inode_item *inode_item;
3581         struct btrfs_root *root = BTRFS_I(inode)->root;
3582         struct btrfs_key location;
3583         unsigned long ptr;
3584         int maybe_acls;
3585         u32 rdev;
3586         int ret;
3587         bool filled = false;
3588         int first_xattr_slot;
3589
3590         ret = btrfs_fill_inode(inode, &rdev);
3591         if (!ret)
3592                 filled = true;
3593
3594         path = btrfs_alloc_path();
3595         if (!path)
3596                 goto make_bad;
3597
3598         memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
3599
3600         ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
3601         if (ret)
3602                 goto make_bad;
3603
3604         leaf = path->nodes[0];
3605
3606         if (filled)
3607                 goto cache_index;
3608
3609         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3610                                     struct btrfs_inode_item);
3611         inode->i_mode = btrfs_inode_mode(leaf, inode_item);
3612         set_nlink(inode, btrfs_inode_nlink(leaf, inode_item));
3613         i_uid_write(inode, btrfs_inode_uid(leaf, inode_item));
3614         i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
3615         btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
3616
3617         inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
3618         inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
3619
3620         inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
3621         inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
3622
3623         inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
3624         inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
3625
3626         BTRFS_I(inode)->i_otime.tv_sec =
3627                 btrfs_timespec_sec(leaf, &inode_item->otime);
3628         BTRFS_I(inode)->i_otime.tv_nsec =
3629                 btrfs_timespec_nsec(leaf, &inode_item->otime);
3630
3631         inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
3632         BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
3633         BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
3634
3635         inode->i_version = btrfs_inode_sequence(leaf, inode_item);
3636         inode->i_generation = BTRFS_I(inode)->generation;
3637         inode->i_rdev = 0;
3638         rdev = btrfs_inode_rdev(leaf, inode_item);
3639
3640         BTRFS_I(inode)->index_cnt = (u64)-1;
3641         BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
3642
3643 cache_index:
3644         /*
3645          * If we were modified in the current generation and evicted from memory
3646          * and then re-read we need to do a full sync since we don't have any
3647          * idea about which extents were modified before we were evicted from
3648          * cache.
3649          *
3650          * This is required for both inode re-read from disk and delayed inode
3651          * in delayed_nodes_tree.
3652          */
3653         if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
3654                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
3655                         &BTRFS_I(inode)->runtime_flags);
3656
3657         /*
3658          * We don't persist the id of the transaction where an unlink operation
3659          * against the inode was last made. So here we assume the inode might
3660          * have been evicted, and therefore the exact value of last_unlink_trans
3661          * lost, and set it to last_trans to avoid metadata inconsistencies
3662          * between the inode and its parent if the inode is fsync'ed and the log
3663          * replayed. For example, in the scenario:
3664          *
3665          * touch mydir/foo
3666          * ln mydir/foo mydir/bar
3667          * sync
3668          * unlink mydir/bar
3669          * echo 2 > /proc/sys/vm/drop_caches   # evicts inode
3670          * xfs_io -c fsync mydir/foo
3671          * <power failure>
3672          * mount fs, triggers fsync log replay
3673          *
3674          * We must make sure that when we fsync our inode foo we also log its
3675          * parent inode, otherwise after log replay the parent still has the
3676          * dentry with the "bar" name but our inode foo has a link count of 1
3677          * and doesn't have an inode ref with the name "bar" anymore.
3678          *
3679          * Setting last_unlink_trans to last_trans is a pessimistic approach,
3680          * but it guarantees correctness at the expense of ocassional full
3681          * transaction commits on fsync if our inode is a directory, or if our
3682          * inode is not a directory, logging its parent unnecessarily.
3683          */
3684         BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
3685
3686         path->slots[0]++;
3687         if (inode->i_nlink != 1 ||
3688             path->slots[0] >= btrfs_header_nritems(leaf))
3689                 goto cache_acl;
3690
3691         btrfs_item_key_to_cpu(leaf, &location, path->slots[0]);
3692         if (location.objectid != btrfs_ino(inode))
3693                 goto cache_acl;
3694
3695         ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3696         if (location.type == BTRFS_INODE_REF_KEY) {
3697                 struct btrfs_inode_ref *ref;
3698
3699                 ref = (struct btrfs_inode_ref *)ptr;
3700                 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref);
3701         } else if (location.type == BTRFS_INODE_EXTREF_KEY) {
3702                 struct btrfs_inode_extref *extref;
3703
3704                 extref = (struct btrfs_inode_extref *)ptr;
3705                 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf,
3706                                                                      extref);
3707         }
3708 cache_acl:
3709         /*
3710          * try to precache a NULL acl entry for files that don't have
3711          * any xattrs or acls
3712          */
3713         maybe_acls = acls_after_inode_item(leaf, path->slots[0],
3714                                            btrfs_ino(inode), &first_xattr_slot);
3715         if (first_xattr_slot != -1) {
3716                 path->slots[0] = first_xattr_slot;
3717                 ret = btrfs_load_inode_props(inode, path);
3718                 if (ret)
3719                         btrfs_err(root->fs_info,
3720                                   "error loading props for ino %llu (root %llu): %d",
3721                                   btrfs_ino(inode),
3722                                   root->root_key.objectid, ret);
3723         }
3724         btrfs_free_path(path);
3725
3726         if (!maybe_acls)
3727                 cache_no_acl(inode);
3728
3729         switch (inode->i_mode & S_IFMT) {
3730         case S_IFREG:
3731                 inode->i_mapping->a_ops = &btrfs_aops;
3732                 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3733                 inode->i_fop = &btrfs_file_operations;
3734                 inode->i_op = &btrfs_file_inode_operations;
3735                 break;
3736         case S_IFDIR:
3737                 inode->i_fop = &btrfs_dir_file_operations;
3738                 if (root == root->fs_info->tree_root)
3739                         inode->i_op = &btrfs_dir_ro_inode_operations;
3740                 else
3741                         inode->i_op = &btrfs_dir_inode_operations;
3742                 break;
3743         case S_IFLNK:
3744                 inode->i_op = &btrfs_symlink_inode_operations;
3745                 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3746                 break;
3747         default:
3748                 inode->i_op = &btrfs_special_inode_operations;
3749                 init_special_inode(inode, inode->i_mode, rdev);
3750                 break;
3751         }
3752
3753         btrfs_update_iflags(inode);
3754         return;
3755
3756 make_bad:
3757         btrfs_free_path(path);
3758         make_bad_inode(inode);
3759 }
3760
3761 /*
3762  * given a leaf and an inode, copy the inode fields into the leaf
3763  */
3764 static void fill_inode_item(struct btrfs_trans_handle *trans,
3765                             struct extent_buffer *leaf,
3766                             struct btrfs_inode_item *item,
3767                             struct inode *inode)
3768 {
3769         struct btrfs_map_token token;
3770
3771         btrfs_init_map_token(&token);
3772
3773         btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3774         btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3775         btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
3776                                    &token);
3777         btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3778         btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3779
3780         btrfs_set_token_timespec_sec(leaf, &item->atime,
3781                                      inode->i_atime.tv_sec, &token);
3782         btrfs_set_token_timespec_nsec(leaf, &item->atime,
3783                                       inode->i_atime.tv_nsec, &token);
3784
3785         btrfs_set_token_timespec_sec(leaf, &item->mtime,
3786                                      inode->i_mtime.tv_sec, &token);
3787         btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3788                                       inode->i_mtime.tv_nsec, &token);
3789
3790         btrfs_set_token_timespec_sec(leaf, &item->ctime,
3791                                      inode->i_ctime.tv_sec, &token);
3792         btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3793                                       inode->i_ctime.tv_nsec, &token);
3794
3795         btrfs_set_token_timespec_sec(leaf, &item->otime,
3796                                      BTRFS_I(inode)->i_otime.tv_sec, &token);
3797         btrfs_set_token_timespec_nsec(leaf, &item->otime,
3798                                       BTRFS_I(inode)->i_otime.tv_nsec, &token);
3799
3800         btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3801                                      &token);
3802         btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
3803                                          &token);
3804         btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3805         btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3806         btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3807         btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3808         btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3809 }
3810
3811 /*
3812  * copy everything in the in-memory inode into the btree.
3813  */
3814 static noinline int btrfs_update_inode_item(struct btrfs_trans_handle *trans,
3815                                 struct btrfs_root *root, struct inode *inode)
3816 {
3817         struct btrfs_inode_item *inode_item;
3818         struct btrfs_path *path;
3819         struct extent_buffer *leaf;
3820         int ret;
3821
3822         path = btrfs_alloc_path();
3823         if (!path)
3824                 return -ENOMEM;
3825
3826         path->leave_spinning = 1;
3827         ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
3828                                  1);
3829         if (ret) {
3830                 if (ret > 0)
3831                         ret = -ENOENT;
3832                 goto failed;
3833         }
3834
3835         leaf = path->nodes[0];
3836         inode_item = btrfs_item_ptr(leaf, path->slots[0],
3837                                     struct btrfs_inode_item);
3838
3839         fill_inode_item(trans, leaf, inode_item, inode);
3840         btrfs_mark_buffer_dirty(leaf);
3841         btrfs_set_inode_last_trans(trans, inode);
3842         ret = 0;
3843 failed:
3844         btrfs_free_path(path);
3845         return ret;
3846 }
3847
3848 /*
3849  * copy everything in the in-memory inode into the btree.
3850  */
3851 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
3852                                 struct btrfs_root *root, struct inode *inode)
3853 {
3854         int ret;
3855
3856         /*
3857          * If the inode is a free space inode, we can deadlock during commit
3858          * if we put it into the delayed code.
3859          *
3860          * The data relocation inode should also be directly updated
3861          * without delay
3862          */
3863         if (!btrfs_is_free_space_inode(inode)
3864             && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
3865             && !root->fs_info->log_root_recovering) {
3866                 btrfs_update_root_times(trans, root);
3867
3868                 ret = btrfs_delayed_update_inode(trans, root, inode);
3869                 if (!ret)
3870                         btrfs_set_inode_last_trans(trans, inode);
3871                 return ret;
3872         }
3873
3874         return btrfs_update_inode_item(trans, root, inode);
3875 }
3876
3877 noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
3878                                          struct btrfs_root *root,
3879                                          struct inode *inode)
3880 {
3881         int ret;
3882
3883         ret = btrfs_update_inode(trans, root, inode);
3884         if (ret == -ENOSPC)
3885                 return btrfs_update_inode_item(trans, root, inode);
3886         return ret;
3887 }
3888
3889 /*
3890  * unlink helper that gets used here in inode.c and in the tree logging
3891  * recovery code.  It remove a link in a directory with a given name, and
3892  * also drops the back refs in the inode to the directory
3893  */
3894 static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3895                                 struct btrfs_root *root,
3896                                 struct inode *dir, struct inode *inode,
3897                                 const char *name, int name_len)
3898 {
3899         struct btrfs_path *path;
3900         int ret = 0;
3901         struct extent_buffer *leaf;
3902         struct btrfs_dir_item *di;
3903         struct btrfs_key key;
3904         u64 index;
3905         u64 ino = btrfs_ino(inode);
3906         u64 dir_ino = btrfs_ino(dir);
3907
3908         path = btrfs_alloc_path();
3909         if (!path) {
3910                 ret = -ENOMEM;
3911                 goto out;
3912         }
3913
3914         path->leave_spinning = 1;
3915         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
3916                                     name, name_len, -1);
3917         if (IS_ERR(di)) {
3918                 ret = PTR_ERR(di);
3919                 goto err;
3920         }
3921         if (!di) {
3922                 ret = -ENOENT;
3923                 goto err;
3924         }
3925         leaf = path->nodes[0];
3926         btrfs_dir_item_key_to_cpu(leaf, di, &key);
3927         ret = btrfs_delete_one_dir_name(trans, root, path, di);
3928         if (ret)
3929                 goto err;
3930         btrfs_release_path(path);
3931
3932         /*
3933          * If we don't have dir index, we have to get it by looking up
3934          * the inode ref, since we get the inode ref, remove it directly,
3935          * it is unnecessary to do delayed deletion.
3936          *
3937          * But if we have dir index, needn't search inode ref to get it.
3938          * Since the inode ref is close to the inode item, it is better
3939          * that we delay to delete it, and just do this deletion when
3940          * we update the inode item.
3941          */
3942         if (BTRFS_I(inode)->dir_index) {
3943                 ret = btrfs_delayed_delete_inode_ref(inode);
3944                 if (!ret) {
3945                         index = BTRFS_I(inode)->dir_index;
3946                         goto skip_backref;
3947                 }
3948         }
3949
3950         ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
3951                                   dir_ino, &index);
3952         if (ret) {
3953                 btrfs_info(root->fs_info,
3954                         "failed to delete reference to %.*s, inode %llu parent %llu",
3955                         name_len, name, ino, dir_ino);
3956                 btrfs_abort_transaction(trans, root, ret);
3957                 goto err;
3958         }
3959 skip_backref:
3960         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
3961         if (ret) {
3962                 btrfs_abort_transaction(trans, root, ret);
3963                 goto err;
3964         }
3965
3966         ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
3967                                          inode, dir_ino);
3968         if (ret != 0 && ret != -ENOENT) {
3969                 btrfs_abort_transaction(trans, root, ret);
3970                 goto err;
3971         }
3972
3973         ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
3974                                            dir, index);
3975         if (ret == -ENOENT)
3976                 ret = 0;
3977         else if (ret)
3978                 btrfs_abort_transaction(trans, root, ret);
3979 err:
3980         btrfs_free_path(path);
3981         if (ret)
3982                 goto out;
3983
3984         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3985         inode_inc_iversion(inode);
3986         inode_inc_iversion(dir);
3987         inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3988         ret = btrfs_update_inode(trans, root, dir);
3989 out:
3990         return ret;
3991 }
3992
3993 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
3994                        struct btrfs_root *root,
3995                        struct inode *dir, struct inode *inode,
3996                        const char *name, int name_len)
3997 {
3998         int ret;
3999         ret = __btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
4000         if (!ret) {
4001                 drop_nlink(inode);
4002                 ret = btrfs_update_inode(trans, root, inode);
4003         }
4004         return ret;
4005 }
4006
4007 /*
4008  * helper to start transaction for unlink and rmdir.
4009  *
4010  * unlink and rmdir are special in btrfs, they do not always free space, so
4011  * if we cannot make our reservations the normal way try and see if there is
4012  * plenty of slack room in the global reserve to migrate, otherwise we cannot
4013  * allow the unlink to occur.
4014  */
4015 static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
4016 {
4017         struct btrfs_trans_handle *trans;
4018         struct btrfs_root *root = BTRFS_I(dir)->root;
4019         int ret;
4020
4021         /*
4022          * 1 for the possible orphan item
4023          * 1 for the dir item
4024          * 1 for the dir index
4025          * 1 for the inode ref
4026          * 1 for the inode
4027          */
4028         trans = btrfs_start_transaction(root, 5);
4029         if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
4030                 return trans;
4031
4032         if (PTR_ERR(trans) == -ENOSPC) {
4033                 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4034
4035                 trans = btrfs_start_transaction(root, 0);
4036                 if (IS_ERR(trans))
4037                         return trans;
4038                 ret = btrfs_cond_migrate_bytes(root->fs_info,
4039                                                &root->fs_info->trans_block_rsv,
4040                                                num_bytes, 5);
4041                 if (ret) {
4042                         btrfs_end_transaction(trans, root);
4043                         return ERR_PTR(ret);
4044                 }
4045                 trans->block_rsv = &root->fs_info->trans_block_rsv;
4046                 trans->bytes_reserved = num_bytes;
4047         }
4048         return trans;
4049 }
4050
4051 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
4052 {
4053         struct btrfs_root *root = BTRFS_I(dir)->root;
4054         struct btrfs_trans_handle *trans;
4055         struct inode *inode = d_inode(dentry);
4056         int ret;
4057
4058         trans = __unlink_start_trans(dir);
4059         if (IS_ERR(trans))
4060                 return PTR_ERR(trans);
4061
4062         btrfs_record_unlink_dir(trans, dir, d_inode(dentry), 0);
4063
4064         ret = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4065                                  dentry->d_name.name, dentry->d_name.len);
4066         if (ret)
4067                 goto out;
4068
4069         if (inode->i_nlink == 0) {
4070                 ret = btrfs_orphan_add(trans, inode);
4071                 if (ret)
4072                         goto out;
4073         }
4074
4075 out:
4076         btrfs_end_transaction(trans, root);
4077         btrfs_btree_balance_dirty(root);
4078         return ret;
4079 }
4080
4081 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
4082                         struct btrfs_root *root,
4083                         struct inode *dir, u64 objectid,
4084                         const char *name, int name_len)
4085 {
4086         struct btrfs_path *path;
4087         struct extent_buffer *leaf;
4088         struct btrfs_dir_item *di;
4089         struct btrfs_key key;
4090         u64 index;
4091         int ret;
4092         u64 dir_ino = btrfs_ino(dir);
4093
4094         path = btrfs_alloc_path();
4095         if (!path)
4096                 return -ENOMEM;
4097
4098         di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
4099                                    name, name_len, -1);
4100         if (IS_ERR_OR_NULL(di)) {
4101                 if (!di)
4102                         ret = -ENOENT;
4103                 else
4104                         ret = PTR_ERR(di);
4105                 goto out;
4106         }
4107
4108         leaf = path->nodes[0];
4109         btrfs_dir_item_key_to_cpu(leaf, di, &key);
4110         WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
4111         ret = btrfs_delete_one_dir_name(trans, root, path, di);
4112         if (ret) {
4113                 btrfs_abort_transaction(trans, root, ret);
4114                 goto out;
4115         }
4116         btrfs_release_path(path);
4117
4118         ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
4119                                  objectid, root->root_key.objectid,
4120                                  dir_ino, &index, name, name_len);
4121         if (ret < 0) {
4122                 if (ret != -ENOENT) {
4123                         btrfs_abort_transaction(trans, root, ret);
4124                         goto out;
4125                 }
4126                 di = btrfs_search_dir_index_item(root, path, dir_ino,
4127                                                  name, name_len);
4128                 if (IS_ERR_OR_NULL(di)) {
4129                         if (!di)
4130                                 ret = -ENOENT;
4131                         else
4132                                 ret = PTR_ERR(di);
4133                         btrfs_abort_transaction(trans, root, ret);
4134                         goto out;
4135                 }
4136
4137                 leaf = path->nodes[0];
4138                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4139                 btrfs_release_path(path);
4140                 index = key.offset;
4141         }
4142         btrfs_release_path(path);
4143
4144         ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
4145         if (ret) {
4146                 btrfs_abort_transaction(trans, root, ret);
4147                 goto out;
4148         }
4149
4150         btrfs_i_size_write(dir, dir->i_size - name_len * 2);
4151         inode_inc_iversion(dir);
4152         dir->i_mtime = dir->i_ctime = CURRENT_TIME;
4153         ret = btrfs_update_inode_fallback(trans, root, dir);
4154         if (ret)
4155                 btrfs_abort_transaction(trans, root, ret);
4156 out:
4157         btrfs_free_path(path);
4158         return ret;
4159 }
4160
4161 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
4162 {
4163         struct inode *inode = d_inode(dentry);
4164         int err = 0;
4165         struct btrfs_root *root = BTRFS_I(dir)->root;
4166         struct btrfs_trans_handle *trans;
4167
4168         if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
4169                 return -ENOTEMPTY;
4170         if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
4171                 return -EPERM;
4172
4173         trans = __unlink_start_trans(dir);
4174         if (IS_ERR(trans))
4175                 return PTR_ERR(trans);
4176
4177         if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
4178                 err = btrfs_unlink_subvol(trans, root, dir,
4179                                           BTRFS_I(inode)->location.objectid,
4180                                           dentry->d_name.name,
4181                                           dentry->d_name.len);
4182                 goto out;
4183         }
4184
4185         err = btrfs_orphan_add(trans, inode);
4186         if (err)
4187                 goto out;
4188
4189         /* now the directory is empty */
4190         err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
4191                                  dentry->d_name.name, dentry->d_name.len);
4192         if (!err)
4193                 btrfs_i_size_write(inode, 0);
4194 out:
4195         btrfs_end_transaction(trans, root);
4196         btrfs_btree_balance_dirty(root);
4197
4198         return err;
4199 }
4200
4201 static int truncate_space_check(struct btrfs_trans_handle *trans,
4202                                 struct btrfs_root *root,
4203                                 u64 bytes_deleted)
4204 {
4205         int ret;
4206
4207         bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
4208         ret = btrfs_block_rsv_add(root, &root->fs_info->trans_block_rsv,
4209                                   bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
4210         if (!ret)
4211                 trans->bytes_reserved += bytes_deleted;
4212         return ret;
4213
4214 }
4215
4216 /*
4217  * this can truncate away extent items, csum items and directory items.
4218  * It starts at a high offset and removes keys until it can't find
4219  * any higher than new_size
4220  *
4221  * csum items that cross the new i_size are truncated to the new size
4222  * as well.
4223  *
4224  * min_type is the minimum key type to truncate down to.  If set to 0, this
4225  * will kill all the items on this inode, including the INODE_ITEM_KEY.
4226  */
4227 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
4228                                struct btrfs_root *root,
4229                                struct inode *inode,
4230                                u64 new_size, u32 min_type)
4231 {
4232         struct btrfs_path *path;
4233         struct extent_buffer *leaf;
4234         struct btrfs_file_extent_item *fi;
4235         struct btrfs_key key;
4236         struct btrfs_key found_key;
4237         u64 extent_start = 0;
4238         u64 extent_num_bytes = 0;
4239         u64 extent_offset = 0;
4240         u64 item_end = 0;
4241         u64 last_size = new_size;
4242         u32 found_type = (u8)-1;
4243         int found_extent;
4244         int del_item;
4245         int pending_del_nr = 0;
4246         int pending_del_slot = 0;
4247         int extent_type = -1;
4248         int ret;
4249         int err = 0;
4250         u64 ino = btrfs_ino(inode);
4251         u64 bytes_deleted = 0;
4252         bool be_nice = 0;
4253         bool should_throttle = 0;
4254         bool should_end = 0;
4255
4256         BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
4257
4258         /*
4259          * for non-free space inodes and ref cows, we want to back off from
4260          * time to time
4261          */
4262         if (!btrfs_is_free_space_inode(inode) &&
4263             test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4264                 be_nice = 1;
4265
4266         path = btrfs_alloc_path();
4267         if (!path)
4268                 return -ENOMEM;
4269         path->reada = -1;
4270
4271         /*
4272          * We want to drop from the next block forward in case this new size is
4273          * not block aligned since we will be keeping the last block of the
4274          * extent just the way it is.
4275          */
4276         if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4277             root == root->fs_info->tree_root)
4278                 btrfs_drop_extent_cache(inode, ALIGN(new_size,
4279                                         root->sectorsize), (u64)-1, 0);
4280
4281         /*
4282          * This function is also used to drop the items in the log tree before
4283          * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4284          * it is used to drop the loged items. So we shouldn't kill the delayed
4285          * items.
4286          */
4287         if (min_type == 0 && root == BTRFS_I(inode)->root)
4288                 btrfs_kill_delayed_inode_items(inode);
4289
4290         key.objectid = ino;
4291         key.offset = (u64)-1;
4292         key.type = (u8)-1;
4293
4294 search_again:
4295         /*
4296          * with a 16K leaf size and 128MB extents, you can actually queue
4297          * up a huge file in a single leaf.  Most of the time that
4298          * bytes_deleted is > 0, it will be huge by the time we get here
4299          */
4300         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4301                 if (btrfs_should_end_transaction(trans, root)) {
4302                         err = -EAGAIN;
4303                         goto error;
4304                 }
4305         }
4306
4307
4308         path->leave_spinning = 1;
4309         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4310         if (ret < 0) {
4311                 err = ret;
4312                 goto out;
4313         }
4314
4315         if (ret > 0) {
4316                 /* there are no items in the tree for us to truncate, we're
4317                  * done
4318                  */
4319                 if (path->slots[0] == 0)
4320                         goto out;
4321                 path->slots[0]--;
4322         }
4323
4324         while (1) {
4325                 fi = NULL;
4326                 leaf = path->nodes[0];
4327                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4328                 found_type = found_key.type;
4329
4330                 if (found_key.objectid != ino)
4331                         break;
4332
4333                 if (found_type < min_type)
4334                         break;
4335
4336                 item_end = found_key.offset;
4337                 if (found_type == BTRFS_EXTENT_DATA_KEY) {
4338                         fi = btrfs_item_ptr(leaf, path->slots[0],
4339                                             struct btrfs_file_extent_item);
4340                         extent_type = btrfs_file_extent_type(leaf, fi);
4341                         if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4342                                 item_end +=
4343                                     btrfs_file_extent_num_bytes(leaf, fi);
4344                         } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4345                                 item_end += btrfs_file_extent_inline_len(leaf,
4346                                                          path->slots[0], fi);
4347                         }
4348                         item_end--;
4349                 }
4350                 if (found_type > min_type) {
4351                         del_item = 1;
4352                 } else {
4353                         if (item_end < new_size)
4354                                 break;
4355                         if (found_key.offset >= new_size)
4356                                 del_item = 1;
4357                         else
4358                                 del_item = 0;
4359                 }
4360                 found_extent = 0;
4361                 /* FIXME, shrink the extent if the ref count is only 1 */
4362                 if (found_type != BTRFS_EXTENT_DATA_KEY)
4363                         goto delete;
4364
4365                 if (del_item)
4366                         last_size = found_key.offset;
4367                 else
4368                         last_size = new_size;
4369
4370                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
4371                         u64 num_dec;
4372                         extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
4373                         if (!del_item) {
4374                                 u64 orig_num_bytes =
4375                                         btrfs_file_extent_num_bytes(leaf, fi);
4376                                 extent_num_bytes = ALIGN(new_size -
4377                                                 found_key.offset,
4378                                                 root->sectorsize);
4379                                 btrfs_set_file_extent_num_bytes(leaf, fi,
4380                                                          extent_num_bytes);
4381                                 num_dec = (orig_num_bytes -
4382                                            extent_num_bytes);
4383                                 if (test_bit(BTRFS_ROOT_REF_COWS,
4384                                              &root->state) &&
4385                                     extent_start != 0)
4386                                         inode_sub_bytes(inode, num_dec);
4387                                 btrfs_mark_buffer_dirty(leaf);
4388                         } else {
4389                                 extent_num_bytes =
4390                                         btrfs_file_extent_disk_num_bytes(leaf,
4391                                                                          fi);
4392                                 extent_offset = found_key.offset -
4393                                         btrfs_file_extent_offset(leaf, fi);
4394
4395                                 /* FIXME blocksize != 4096 */
4396                                 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
4397                                 if (extent_start != 0) {
4398                                         found_extent = 1;
4399                                         if (test_bit(BTRFS_ROOT_REF_COWS,
4400                                                      &root->state))
4401                                                 inode_sub_bytes(inode, num_dec);
4402                                 }
4403                         }
4404                 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
4405                         /*
4406                          * we can't truncate inline items that have had
4407                          * special encodings
4408                          */
4409                         if (!del_item &&
4410                             btrfs_file_extent_compression(leaf, fi) == 0 &&
4411                             btrfs_file_extent_encryption(leaf, fi) == 0 &&
4412                             btrfs_file_extent_other_encoding(leaf, fi) == 0) {
4413                                 u32 size = new_size - found_key.offset;
4414
4415                                 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
4416                                         inode_sub_bytes(inode, item_end + 1 -
4417                                                         new_size);
4418
4419                                 /*
4420                                  * update the ram bytes to properly reflect
4421                                  * the new size of our item
4422                                  */
4423                                 btrfs_set_file_extent_ram_bytes(leaf, fi, size);
4424                                 size =
4425                                     btrfs_file_extent_calc_inline_size(size);
4426                                 btrfs_truncate_item(root, path, size, 1);
4427                         } else if (test_bit(BTRFS_ROOT_REF_COWS,
4428                                             &root->state)) {
4429                                 inode_sub_bytes(inode, item_end + 1 -
4430                                                 found_key.offset);
4431                         }
4432                 }
4433 delete:
4434                 if (del_item) {
4435                         if (!pending_del_nr) {
4436                                 /* no pending yet, add ourselves */
4437                                 pending_del_slot = path->slots[0];
4438                                 pending_del_nr = 1;
4439                         } else if (pending_del_nr &&
4440                                    path->slots[0] + 1 == pending_del_slot) {
4441                                 /* hop on the pending chunk */
4442                                 pending_del_nr++;
4443                                 pending_del_slot = path->slots[0];
4444                         } else {
4445                                 BUG();
4446                         }
4447                 } else {
4448                         break;
4449                 }
4450                 should_throttle = 0;
4451
4452                 if (found_extent &&
4453                     (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
4454                      root == root->fs_info->tree_root)) {
4455                         btrfs_set_path_blocking(path);
4456                         bytes_deleted += extent_num_bytes;
4457                         ret = btrfs_free_extent(trans, root, extent_start,
4458                                                 extent_num_bytes, 0,
4459                                                 btrfs_header_owner(leaf),
4460                                                 ino, extent_offset, 0);
4461                         BUG_ON(ret);
4462                         if (btrfs_should_throttle_delayed_refs(trans, root))
4463                                 btrfs_async_run_delayed_refs(root,
4464                                         trans->delayed_ref_updates * 2, 0);
4465                         if (be_nice) {
4466                                 if (truncate_space_check(trans, root,
4467                                                          extent_num_bytes)) {
4468                                         should_end = 1;
4469                                 }
4470                                 if (btrfs_should_throttle_delayed_refs(trans,
4471                                                                        root)) {
4472                                         should_throttle = 1;
4473                                 }
4474                         }
4475                 }
4476
4477                 if (found_type == BTRFS_INODE_ITEM_KEY)
4478                         break;
4479
4480                 if (path->slots[0] == 0 ||
4481                     path->slots[0] != pending_del_slot ||
4482                     should_throttle || should_end) {
4483                         if (pending_del_nr) {
4484                                 ret = btrfs_del_items(trans, root, path,
4485                                                 pending_del_slot,
4486                                                 pending_del_nr);
4487                                 if (ret) {
4488                                         btrfs_abort_transaction(trans,
4489                                                                 root, ret);
4490                                         goto error;
4491                                 }
4492                                 pending_del_nr = 0;
4493                         }
4494                         btrfs_release_path(path);
4495                         if (should_throttle) {
4496                                 unsigned long updates = trans->delayed_ref_updates;
4497                                 if (updates) {
4498                                         trans->delayed_ref_updates = 0;
4499                                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4500                                         if (ret && !err)
4501                                                 err = ret;
4502                                 }
4503                         }
4504                         /*
4505                          * if we failed to refill our space rsv, bail out
4506                          * and let the transaction restart
4507                          */
4508                         if (should_end) {
4509                                 err = -EAGAIN;
4510                                 goto error;
4511                         }
4512                         goto search_again;
4513                 } else {
4514                         path->slots[0]--;
4515                 }
4516         }
4517 out:
4518         if (pending_del_nr) {
4519                 ret = btrfs_del_items(trans, root, path, pending_del_slot,
4520                                       pending_del_nr);
4521                 if (ret)
4522                         btrfs_abort_transaction(trans, root, ret);
4523         }
4524 error:
4525         if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4526                 btrfs_ordered_update_i_size(inode, last_size, NULL);
4527
4528         btrfs_free_path(path);
4529
4530         if (be_nice && bytes_deleted > 32 * 1024 * 1024) {
4531                 unsigned long updates = trans->delayed_ref_updates;
4532                 if (updates) {
4533                         trans->delayed_ref_updates = 0;
4534                         ret = btrfs_run_delayed_refs(trans, root, updates * 2);
4535                         if (ret && !err)
4536                                 err = ret;
4537                 }
4538         }
4539         return err;
4540 }
4541
4542 /*
4543  * btrfs_truncate_page - read, zero a chunk and write a page
4544  * @inode - inode that we're zeroing
4545  * @from - the offset to start zeroing
4546  * @len - the length to zero, 0 to zero the entire range respective to the
4547  *      offset
4548  * @front - zero up to the offset instead of from the offset on
4549  *
4550  * This will find the page for the "from" offset and cow the page and zero the
4551  * part we want to zero.  This is used with truncate and hole punching.
4552  */
4553 int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
4554                         int front)
4555 {
4556         struct address_space *mapping = inode->i_mapping;
4557         struct btrfs_root *root = BTRFS_I(inode)->root;
4558         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4559         struct btrfs_ordered_extent *ordered;
4560         struct extent_state *cached_state = NULL;
4561         char *kaddr;
4562         u32 blocksize = root->sectorsize;
4563         pgoff_t index = from >> PAGE_CACHE_SHIFT;
4564         unsigned offset = from & (PAGE_CACHE_SIZE-1);
4565         struct page *page;
4566         gfp_t mask = btrfs_alloc_write_mask(mapping);
4567         int ret = 0;
4568         u64 page_start;
4569         u64 page_end;
4570
4571         if ((offset & (blocksize - 1)) == 0 &&
4572             (!len || ((len & (blocksize - 1)) == 0)))
4573                 goto out;
4574         ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
4575         if (ret)
4576                 goto out;
4577
4578 again:
4579         page = find_or_create_page(mapping, index, mask);
4580         if (!page) {
4581                 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4582                 ret = -ENOMEM;
4583                 goto out;
4584         }
4585
4586         page_start = page_offset(page);
4587         page_end = page_start + PAGE_CACHE_SIZE - 1;
4588
4589         if (!PageUptodate(page)) {
4590                 ret = btrfs_readpage(NULL, page);
4591                 lock_page(page);
4592                 if (page->mapping != mapping) {
4593                         unlock_page(page);
4594                         page_cache_release(page);
4595                         goto again;
4596                 }
4597                 if (!PageUptodate(page)) {
4598                         ret = -EIO;
4599                         goto out_unlock;
4600                 }
4601         }
4602         wait_on_page_writeback(page);
4603
4604         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
4605         set_page_extent_mapped(page);
4606
4607         ordered = btrfs_lookup_ordered_extent(inode, page_start);
4608         if (ordered) {
4609                 unlock_extent_cached(io_tree, page_start, page_end,
4610                                      &cached_state, GFP_NOFS);
4611                 unlock_page(page);
4612                 page_cache_release(page);
4613                 btrfs_start_ordered_extent(inode, ordered, 1);
4614                 btrfs_put_ordered_extent(ordered);
4615                 goto again;
4616         }
4617
4618         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
4619                           EXTENT_DIRTY | EXTENT_DELALLOC |
4620                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
4621                           0, 0, &cached_state, GFP_NOFS);
4622
4623         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
4624                                         &cached_state);
4625         if (ret) {
4626                 unlock_extent_cached(io_tree, page_start, page_end,
4627                                      &cached_state, GFP_NOFS);
4628                 goto out_unlock;
4629         }
4630
4631         if (offset != PAGE_CACHE_SIZE) {
4632                 if (!len)
4633                         len = PAGE_CACHE_SIZE - offset;
4634                 kaddr = kmap(page);
4635                 if (front)
4636                         memset(kaddr, 0, offset);
4637                 else
4638                         memset(kaddr + offset, 0, len);
4639                 flush_dcache_page(page);
4640                 kunmap(page);
4641         }
4642         ClearPageChecked(page);
4643         set_page_dirty(page);
4644         unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
4645                              GFP_NOFS);
4646
4647 out_unlock:
4648         if (ret)
4649                 btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
4650         unlock_page(page);
4651         page_cache_release(page);
4652 out:
4653         return ret;
4654 }
4655
4656 static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
4657                              u64 offset, u64 len)
4658 {
4659         struct btrfs_trans_handle *trans;
4660         int ret;
4661
4662         /*
4663          * Still need to make sure the inode looks like it's been updated so
4664          * that any holes get logged if we fsync.
4665          */
4666         if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) {
4667                 BTRFS_I(inode)->last_trans = root->fs_info->generation;
4668                 BTRFS_I(inode)->last_sub_trans = root->log_transid;
4669                 BTRFS_I(inode)->last_log_commit = root->last_log_commit;
4670                 return 0;
4671         }
4672
4673         /*
4674          * 1 - for the one we're dropping
4675          * 1 - for the one we're adding
4676          * 1 - for updating the inode.
4677          */
4678         trans = btrfs_start_transaction(root, 3);
4679         if (IS_ERR(trans))
4680                 return PTR_ERR(trans);
4681
4682         ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
4683         if (ret) {
4684                 btrfs_abort_transaction(trans, root, ret);
4685                 btrfs_end_transaction(trans, root);
4686                 return ret;
4687         }
4688
4689         ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
4690                                        0, 0, len, 0, len, 0, 0, 0);
4691         if (ret)
4692                 btrfs_abort_transaction(trans, root, ret);
4693         else
4694                 btrfs_update_inode(trans, root, inode);
4695         btrfs_end_transaction(trans, root);
4696         return ret;
4697 }
4698
4699 /*
4700  * This function puts in dummy file extents for the area we're creating a hole
4701  * for.  So if we are truncating this file to a larger size we need to insert
4702  * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4703  * the range between oldsize and size
4704  */
4705 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
4706 {
4707         struct btrfs_root *root = BTRFS_I(inode)->root;
4708         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4709         struct extent_map *em = NULL;
4710         struct extent_state *cached_state = NULL;
4711         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4712         u64 hole_start = ALIGN(oldsize, root->sectorsize);
4713         u64 block_end = ALIGN(size, root->sectorsize);
4714         u64 last_byte;
4715         u64 cur_offset;
4716         u64 hole_size;
4717         int err = 0;
4718
4719         /*
4720          * If our size started in the middle of a page we need to zero out the
4721          * rest of the page before we expand the i_size, otherwise we could
4722          * expose stale data.
4723          */
4724         err = btrfs_truncate_page(inode, oldsize, 0, 0);
4725         if (err)
4726                 return err;
4727
4728         if (size <= hole_start)
4729                 return 0;
4730
4731         while (1) {
4732                 struct btrfs_ordered_extent *ordered;
4733
4734                 lock_extent_bits(io_tree, hole_start, block_end - 1, 0,
4735                                  &cached_state);
4736                 ordered = btrfs_lookup_ordered_range(inode, hole_start,
4737                                                      block_end - hole_start);
4738                 if (!ordered)
4739                         break;
4740                 unlock_extent_cached(io_tree, hole_start, block_end - 1,
4741                                      &cached_state, GFP_NOFS);
4742                 btrfs_start_ordered_extent(inode, ordered, 1);
4743                 btrfs_put_ordered_extent(ordered);
4744         }
4745
4746         cur_offset = hole_start;
4747         while (1) {
4748                 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
4749                                 block_end - cur_offset, 0);
4750                 if (IS_ERR(em)) {
4751                         err = PTR_ERR(em);
4752                         em = NULL;
4753                         break;
4754                 }
4755                 last_byte = min(extent_map_end(em), block_end);
4756                 last_byte = ALIGN(last_byte , root->sectorsize);
4757                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
4758                         struct extent_map *hole_em;
4759                         hole_size = last_byte - cur_offset;
4760
4761                         err = maybe_insert_hole(root, inode, cur_offset,
4762                                                 hole_size);
4763                         if (err)
4764                                 break;
4765                         btrfs_drop_extent_cache(inode, cur_offset,
4766                                                 cur_offset + hole_size - 1, 0);
4767                         hole_em = alloc_extent_map();
4768                         if (!hole_em) {
4769                                 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4770                                         &BTRFS_I(inode)->runtime_flags);
4771                                 goto next;
4772                         }
4773                         hole_em->start = cur_offset;
4774                         hole_em->len = hole_size;
4775                         hole_em->orig_start = cur_offset;
4776
4777                         hole_em->block_start = EXTENT_MAP_HOLE;
4778                         hole_em->block_len = 0;
4779                         hole_em->orig_block_len = 0;
4780                         hole_em->ram_bytes = hole_size;
4781                         hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
4782                         hole_em->compress_type = BTRFS_COMPRESS_NONE;
4783                         hole_em->generation = root->fs_info->generation;
4784
4785                         while (1) {
4786                                 write_lock(&em_tree->lock);
4787                                 err = add_extent_mapping(em_tree, hole_em, 1);
4788                                 write_unlock(&em_tree->lock);
4789                                 if (err != -EEXIST)
4790                                         break;
4791                                 btrfs_drop_extent_cache(inode, cur_offset,
4792                                                         cur_offset +
4793                                                         hole_size - 1, 0);
4794                         }
4795                         free_extent_map(hole_em);
4796                 }
4797 next:
4798                 free_extent_map(em);
4799                 em = NULL;
4800                 cur_offset = last_byte;
4801                 if (cur_offset >= block_end)
4802                         break;
4803         }
4804         free_extent_map(em);
4805         unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state,
4806                              GFP_NOFS);
4807         return err;
4808 }
4809
4810 static int wait_snapshoting_atomic_t(atomic_t *a)
4811 {
4812         schedule();
4813         return 0;
4814 }
4815
4816 static void wait_for_snapshot_creation(struct btrfs_root *root)
4817 {
4818         while (true) {
4819                 int ret;
4820
4821                 ret = btrfs_start_write_no_snapshoting(root);
4822                 if (ret)
4823                         break;
4824                 wait_on_atomic_t(&root->will_be_snapshoted,
4825                                  wait_snapshoting_atomic_t,
4826                                  TASK_UNINTERRUPTIBLE);
4827         }
4828 }
4829
4830 static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4831 {
4832         struct btrfs_root *root = BTRFS_I(inode)->root;
4833         struct btrfs_trans_handle *trans;
4834         loff_t oldsize = i_size_read(inode);
4835         loff_t newsize = attr->ia_size;
4836         int mask = attr->ia_valid;
4837         int ret;
4838
4839         /*
4840          * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4841          * special case where we need to update the times despite not having
4842          * these flags set.  For all other operations the VFS set these flags
4843          * explicitly if it wants a timestamp update.
4844          */
4845         if (newsize != oldsize) {
4846                 inode_inc_iversion(inode);
4847                 if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
4848                         inode->i_ctime = inode->i_mtime =
4849                                 current_fs_time(inode->i_sb);
4850         }
4851
4852         if (newsize > oldsize) {
4853                 truncate_pagecache(inode, newsize);
4854                 /*
4855                  * Don't do an expanding truncate while snapshoting is ongoing.
4856                  * This is to ensure the snapshot captures a fully consistent
4857                  * state of this file - if the snapshot captures this expanding
4858                  * truncation, it must capture all writes that happened before
4859                  * this truncation.
4860                  */
4861                 wait_for_snapshot_creation(root);
4862                 ret = btrfs_cont_expand(inode, oldsize, newsize);
4863                 if (ret) {
4864                         btrfs_end_write_no_snapshoting(root);
4865                         return ret;
4866                 }
4867
4868                 trans = btrfs_start_transaction(root, 1);
4869                 if (IS_ERR(trans)) {
4870                         btrfs_end_write_no_snapshoting(root);
4871                         return PTR_ERR(trans);
4872                 }
4873
4874                 i_size_write(inode, newsize);
4875                 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4876                 ret = btrfs_update_inode(trans, root, inode);
4877                 btrfs_end_write_no_snapshoting(root);
4878                 btrfs_end_transaction(trans, root);
4879         } else {
4880
4881                 /*
4882                  * We're truncating a file that used to have good data down to
4883                  * zero. Make sure it gets into the ordered flush list so that
4884                  * any new writes get down to disk quickly.
4885                  */
4886                 if (newsize == 0)
4887                         set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
4888                                 &BTRFS_I(inode)->runtime_flags);
4889
4890                 /*
4891                  * 1 for the orphan item we're going to add
4892                  * 1 for the orphan item deletion.
4893                  */
4894                 trans = btrfs_start_transaction(root, 2);
4895                 if (IS_ERR(trans))
4896                         return PTR_ERR(trans);
4897
4898                 /*
4899                  * We need to do this in case we fail at _any_ point during the
4900                  * actual truncate.  Once we do the truncate_setsize we could
4901                  * invalidate pages which forces any outstanding ordered io to
4902                  * be instantly completed which will give us extents that need
4903                  * to be truncated.  If we fail to get an orphan inode down we
4904                  * could have left over extents that were never meant to live,
4905                  * so we need to garuntee from this point on that everything
4906                  * will be consistent.
4907                  */
4908                 ret = btrfs_orphan_add(trans, inode);
4909                 btrfs_end_transaction(trans, root);
4910                 if (ret)
4911                         return ret;
4912
4913                 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4914                 truncate_setsize(inode, newsize);
4915
4916                 /* Disable nonlocked read DIO to avoid the end less truncate */
4917                 btrfs_inode_block_unlocked_dio(inode);
4918                 inode_dio_wait(inode);
4919                 btrfs_inode_resume_unlocked_dio(inode);
4920
4921                 ret = btrfs_truncate(inode);
4922                 if (ret && inode->i_nlink) {
4923                         int err;
4924
4925                         /*
4926                          * failed to truncate, disk_i_size is only adjusted down
4927                          * as we remove extents, so it should represent the true
4928                          * size of the inode, so reset the in memory size and
4929                          * delete our orphan entry.
4930                          */
4931                         trans = btrfs_join_transaction(root);
4932                         if (IS_ERR(trans)) {
4933                                 btrfs_orphan_del(NULL, inode);
4934                                 return ret;
4935                         }
4936                         i_size_write(inode, BTRFS_I(inode)->disk_i_size);
4937                         err = btrfs_orphan_del(trans, inode);
4938                         if (err)
4939                                 btrfs_abort_transaction(trans, root, err);
4940                         btrfs_end_transaction(trans, root);
4941                 }
4942         }
4943
4944         return ret;
4945 }
4946
4947 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
4948 {
4949         struct inode *inode = d_inode(dentry);
4950         struct btrfs_root *root = BTRFS_I(inode)->root;
4951         int err;
4952
4953         if (btrfs_root_readonly(root))
4954                 return -EROFS;
4955
4956         err = inode_change_ok(inode, attr);
4957         if (err)
4958                 return err;
4959
4960         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
4961                 err = btrfs_setsize(inode, attr);
4962                 if (err)
4963                         return err;
4964         }
4965
4966         if (attr->ia_valid) {
4967                 setattr_copy(inode, attr);
4968                 inode_inc_iversion(inode);
4969                 err = btrfs_dirty_inode(inode);
4970
4971                 if (!err && attr->ia_valid & ATTR_MODE)
4972                         err = posix_acl_chmod(inode, inode->i_mode);
4973         }
4974
4975         return err;
4976 }
4977
4978 /*
4979  * While truncating the inode pages during eviction, we get the VFS calling
4980  * btrfs_invalidatepage() against each page of the inode. This is slow because
4981  * the calls to btrfs_invalidatepage() result in a huge amount of calls to
4982  * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
4983  * extent_state structures over and over, wasting lots of time.
4984  *
4985  * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
4986  * those expensive operations on a per page basis and do only the ordered io
4987  * finishing, while we release here the extent_map and extent_state structures,
4988  * without the excessive merging and splitting.
4989  */
4990 static void evict_inode_truncate_pages(struct inode *inode)
4991 {
4992         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4993         struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree;
4994         struct rb_node *node;
4995
4996         ASSERT(inode->i_state & I_FREEING);
4997         truncate_inode_pages_final(&inode->i_data);
4998
4999         write_lock(&map_tree->lock);
5000         while (!RB_EMPTY_ROOT(&map_tree->map)) {
5001                 struct extent_map *em;
5002
5003                 node = rb_first(&map_tree->map);
5004                 em = rb_entry(node, struct extent_map, rb_node);
5005                 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
5006                 clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
5007                 remove_extent_mapping(map_tree, em);
5008                 free_extent_map(em);
5009                 if (need_resched()) {
5010                         write_unlock(&map_tree->lock);
5011                         cond_resched();
5012                         write_lock(&map_tree->lock);
5013                 }
5014         }
5015         write_unlock(&map_tree->lock);
5016
5017         /*
5018          * Keep looping until we have no more ranges in the io tree.
5019          * We can have ongoing bios started by readpages (called from readahead)
5020          * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5021          * still in progress (unlocked the pages in the bio but did not yet
5022          * unlocked the ranges in the io tree). Therefore this means some
5023          * ranges can still be locked and eviction started because before
5024          * submitting those bios, which are executed by a separate task (work
5025          * queue kthread), inode references (inode->i_count) were not taken
5026          * (which would be dropped in the end io callback of each bio).
5027          * Therefore here we effectively end up waiting for those bios and
5028          * anyone else holding locked ranges without having bumped the inode's
5029          * reference count - if we don't do it, when they access the inode's
5030          * io_tree to unlock a range it may be too late, leading to an
5031          * use-after-free issue.
5032          */
5033         spin_lock(&io_tree->lock);
5034         while (!RB_EMPTY_ROOT(&io_tree->state)) {
5035                 struct extent_state *state;
5036                 struct extent_state *cached_state = NULL;
5037                 u64 start;
5038                 u64 end;
5039
5040                 node = rb_first(&io_tree->state);
5041                 state = rb_entry(node, struct extent_state, rb_node);
5042                 start = state->start;
5043                 end = state->end;
5044                 spin_unlock(&io_tree->lock);
5045
5046                 lock_extent_bits(io_tree, start, end, 0, &cached_state);
5047                 clear_extent_bit(io_tree, start, end,
5048                                  EXTENT_LOCKED | EXTENT_DIRTY |
5049                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
5050                                  EXTENT_DEFRAG, 1, 1,
5051                                  &cached_state, GFP_NOFS);
5052
5053                 cond_resched();
5054                 spin_lock(&io_tree->lock);
5055         }
5056         spin_unlock(&io_tree->lock);
5057 }
5058
5059 void btrfs_evict_inode(struct inode *inode)
5060 {
5061         struct btrfs_trans_handle *trans;
5062         struct btrfs_root *root = BTRFS_I(inode)->root;
5063         struct btrfs_block_rsv *rsv, *global_rsv;
5064         int steal_from_global = 0;
5065         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
5066         int ret;
5067
5068         trace_btrfs_inode_evict(inode);
5069
5070         evict_inode_truncate_pages(inode);
5071
5072         if (inode->i_nlink &&
5073             ((btrfs_root_refs(&root->root_item) != 0 &&
5074               root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) ||
5075              btrfs_is_free_space_inode(inode)))
5076                 goto no_delete;
5077
5078         if (is_bad_inode(inode)) {
5079                 btrfs_orphan_del(NULL, inode);
5080                 goto no_delete;
5081         }
5082         /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5083         if (!special_file(inode->i_mode))
5084                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
5085
5086         btrfs_free_io_failure_record(inode, 0, (u64)-1);
5087
5088         if (root->fs_info->log_root_recovering) {
5089                 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
5090                                  &BTRFS_I(inode)->runtime_flags));
5091                 goto no_delete;
5092         }
5093
5094         if (inode->i_nlink > 0) {
5095                 BUG_ON(btrfs_root_refs(&root->root_item) != 0 &&
5096                        root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID);
5097                 goto no_delete;
5098         }
5099
5100         ret = btrfs_commit_inode_delayed_inode(inode);
5101         if (ret) {
5102                 btrfs_orphan_del(NULL, inode);
5103                 goto no_delete;
5104         }
5105
5106         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
5107         if (!rsv) {
5108                 btrfs_orphan_del(NULL, inode);
5109                 goto no_delete;
5110         }
5111         rsv->size = min_size;
5112         rsv->failfast = 1;
5113         global_rsv = &root->fs_info->global_block_rsv;
5114
5115         btrfs_i_size_write(inode, 0);
5116
5117         /*
5118          * This is a bit simpler than btrfs_truncate since we've already
5119          * reserved our space for our orphan item in the unlink, so we just
5120          * need to reserve some slack space in case we add bytes and update
5121          * inode item when doing the truncate.
5122          */
5123         while (1) {
5124                 ret = btrfs_block_rsv_refill(root, rsv, min_size,
5125                                              BTRFS_RESERVE_FLUSH_LIMIT);
5126
5127                 /*
5128                  * Try and steal from the global reserve since we will
5129                  * likely not use this space anyway, we want to try as
5130                  * hard as possible to get this to work.
5131                  */
5132                 if (ret)
5133                         steal_from_global++;
5134                 else
5135                         steal_from_global = 0;
5136                 ret = 0;
5137
5138                 /*
5139                  * steal_from_global == 0: we reserved stuff, hooray!
5140                  * steal_from_global == 1: we didn't reserve stuff, boo!
5141                  * steal_from_global == 2: we've committed, still not a lot of
5142                  * room but maybe we'll have room in the global reserve this
5143                  * time.
5144                  * steal_from_global == 3: abandon all hope!
5145                  */
5146                 if (steal_from_global > 2) {
5147                         btrfs_warn(root->fs_info,
5148                                 "Could not get space for a delete, will truncate on mount %d",
5149                                 ret);
5150                         btrfs_orphan_del(NULL, inode);
5151                         btrfs_free_block_rsv(root, rsv);
5152                         goto no_delete;
5153                 }
5154
5155                 trans = btrfs_join_transaction(root);
5156                 if (IS_ERR(trans)) {
5157                         btrfs_orphan_del(NULL, inode);
5158                         btrfs_free_block_rsv(root, rsv);
5159                         goto no_delete;
5160                 }
5161
5162                 /*
5163                  * We can't just steal from the global reserve, we need tomake
5164                  * sure there is room to do it, if not we need to commit and try
5165                  * again.
5166                  */
5167                 if (steal_from_global) {
5168                         if (!btrfs_check_space_for_delayed_refs(trans, root))
5169                                 ret = btrfs_block_rsv_migrate(global_rsv, rsv,
5170                                                               min_size);
5171                         else
5172                                 ret = -ENOSPC;
5173                 }
5174
5175                 /*
5176                  * Couldn't steal from the global reserve, we have too much
5177                  * pending stuff built up, commit the transaction and try it
5178                  * again.
5179                  */
5180                 if (ret) {
5181                         ret = btrfs_commit_transaction(trans, root);
5182                         if (ret) {
5183                                 btrfs_orphan_del(NULL, inode);
5184                                 btrfs_free_block_rsv(root, rsv);
5185                                 goto no_delete;
5186                         }
5187                         continue;
5188                 } else {
5189                         steal_from_global = 0;
5190                 }
5191
5192                 trans->block_rsv = rsv;
5193
5194                 ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
5195                 if (ret != -ENOSPC && ret != -EAGAIN)
5196                         break;
5197
5198                 trans->block_rsv = &root->fs_info->trans_block_rsv;
5199                 btrfs_end_transaction(trans, root);
5200                 trans = NULL;
5201                 btrfs_btree_balance_dirty(root);
5202         }
5203
5204         btrfs_free_block_rsv(root, rsv);
5205
5206         /*
5207          * Errors here aren't a big deal, it just means we leave orphan items
5208          * in the tree.  They will be cleaned up on the next mount.
5209          */
5210         if (ret == 0) {
5211                 trans->block_rsv = root->orphan_block_rsv;
5212                 btrfs_orphan_del(trans, inode);
5213         } else {
5214                 btrfs_orphan_del(NULL, inode);
5215         }
5216
5217         trans->block_rsv = &root->fs_info->trans_block_rsv;
5218         if (!(root == root->fs_info->tree_root ||
5219               root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
5220                 btrfs_return_ino(root, btrfs_ino(inode));
5221
5222         btrfs_end_transaction(trans, root);
5223         btrfs_btree_balance_dirty(root);
5224 no_delete:
5225         btrfs_remove_delayed_node(inode);
5226         clear_inode(inode);
5227         return;
5228 }
5229
5230 /*
5231  * this returns the key found in the dir entry in the location pointer.
5232  * If no dir entries were found, location->objectid is 0.
5233  */
5234 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
5235                                struct btrfs_key *location)
5236 {
5237         const char *name = dentry->d_name.name;
5238         int namelen = dentry->d_name.len;
5239         struct btrfs_dir_item *di;
5240         struct btrfs_path *path;
5241         struct btrfs_root *root = BTRFS_I(dir)->root;
5242         int ret = 0;
5243
5244         path = btrfs_alloc_path();
5245         if (!path)
5246                 return -ENOMEM;
5247
5248         di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
5249                                     namelen, 0);
5250         if (IS_ERR(di))
5251                 ret = PTR_ERR(di);
5252
5253         if (IS_ERR_OR_NULL(di))
5254                 goto out_err;
5255
5256         btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
5257 out:
5258         btrfs_free_path(path);
5259         return ret;
5260 out_err:
5261         location->objectid = 0;
5262         goto out;
5263 }
5264
5265 /*
5266  * when we hit a tree root in a directory, the btrfs part of the inode
5267  * needs to be changed to reflect the root directory of the tree root.  This
5268  * is kind of like crossing a mount point.
5269  */
5270 static int fixup_tree_root_location(struct btrfs_root *root,
5271                                     struct inode *dir,
5272                                     struct dentry *dentry,
5273                                     struct btrfs_key *location,
5274                                     struct btrfs_root **sub_root)
5275 {
5276         struct btrfs_path *path;
5277         struct btrfs_root *new_root;
5278         struct btrfs_root_ref *ref;
5279         struct extent_buffer *leaf;
5280         struct btrfs_key key;
5281         int ret;
5282         int err = 0;
5283
5284         path = btrfs_alloc_path();
5285         if (!path) {
5286                 err = -ENOMEM;
5287                 goto out;
5288         }
5289
5290         err = -ENOENT;
5291         key.objectid = BTRFS_I(dir)->root->root_key.objectid;
5292         key.type = BTRFS_ROOT_REF_KEY;
5293         key.offset = location->objectid;
5294
5295         ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
5296                                 0, 0);
5297         if (ret) {
5298                 if (ret < 0)
5299                         err = ret;
5300                 goto out;
5301         }
5302
5303         leaf = path->nodes[0];
5304         ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
5305         if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
5306             btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
5307                 goto out;
5308
5309         ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
5310                                    (unsigned long)(ref + 1),
5311                                    dentry->d_name.len);
5312         if (ret)
5313                 goto out;
5314
5315         btrfs_release_path(path);
5316
5317         new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
5318         if (IS_ERR(new_root)) {
5319                 err = PTR_ERR(new_root);
5320                 goto out;
5321         }
5322
5323         *sub_root = new_root;
5324         location->objectid = btrfs_root_dirid(&new_root->root_item);
5325         location->type = BTRFS_INODE_ITEM_KEY;
5326         location->offset = 0;
5327         err = 0;
5328 out:
5329         btrfs_free_path(path);
5330         return err;
5331 }
5332
5333 static void inode_tree_add(struct inode *inode)
5334 {
5335         struct btrfs_root *root = BTRFS_I(inode)->root;
5336         struct btrfs_inode *entry;
5337         struct rb_node **p;
5338         struct rb_node *parent;
5339         struct rb_node *new = &BTRFS_I(inode)->rb_node;
5340         u64 ino = btrfs_ino(inode);
5341
5342         if (inode_unhashed(inode))
5343                 return;
5344         parent = NULL;
5345         spin_lock(&root->inode_lock);
5346         p = &root->inode_tree.rb_node;
5347         while (*p) {
5348                 parent = *p;
5349                 entry = rb_entry(parent, struct btrfs_inode, rb_node);
5350
5351                 if (ino < btrfs_ino(&entry->vfs_inode))
5352                         p = &parent->rb_left;
5353                 else if (ino > btrfs_ino(&entry->vfs_inode))
5354                         p = &parent->rb_right;
5355                 else {
5356                         WARN_ON(!(entry->vfs_inode.i_state &
5357                                   (I_WILL_FREE | I_FREEING)));
5358                         rb_replace_node(parent, new, &root->inode_tree);
5359                         RB_CLEAR_NODE(parent);
5360                         spin_unlock(&root->inode_lock);
5361                         return;
5362                 }
5363         }
5364         rb_link_node(new, parent, p);
5365         rb_insert_color(new, &root->inode_tree);
5366         spin_unlock(&root->inode_lock);
5367 }
5368
5369 static void inode_tree_del(struct inode *inode)
5370 {
5371         struct btrfs_root *root = BTRFS_I(inode)->root;
5372         int empty = 0;
5373
5374         spin_lock(&root->inode_lock);
5375         if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
5376                 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
5377                 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
5378                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5379         }
5380         spin_unlock(&root->inode_lock);
5381
5382         if (empty && btrfs_root_refs(&root->root_item) == 0) {
5383                 synchronize_srcu(&root->fs_info->subvol_srcu);
5384                 spin_lock(&root->inode_lock);
5385                 empty = RB_EMPTY_ROOT(&root->inode_tree);
5386                 spin_unlock(&root->inode_lock);
5387                 if (empty)
5388                         btrfs_add_dead_root(root);
5389         }
5390 }
5391
5392 void btrfs_invalidate_inodes(struct btrfs_root *root)
5393 {
5394         struct rb_node *node;
5395         struct rb_node *prev;
5396         struct btrfs_inode *entry;
5397         struct inode *inode;
5398         u64 objectid = 0;
5399
5400         if (!test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
5401                 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
5402
5403         spin_lock(&root->inode_lock);
5404 again:
5405         node = root->inode_tree.rb_node;
5406         prev = NULL;
5407         while (node) {
5408                 prev = node;
5409                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5410
5411                 if (objectid < btrfs_ino(&entry->vfs_inode))
5412                         node = node->rb_left;
5413                 else if (objectid > btrfs_ino(&entry->vfs_inode))
5414                         node = node->rb_right;
5415                 else
5416                         break;
5417         }
5418         if (!node) {
5419                 while (prev) {
5420                         entry = rb_entry(prev, struct btrfs_inode, rb_node);
5421                         if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5422                                 node = prev;
5423                                 break;
5424                         }
5425                         prev = rb_next(prev);
5426                 }
5427         }
5428         while (node) {
5429                 entry = rb_entry(node, struct btrfs_inode, rb_node);
5430                 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5431                 inode = igrab(&entry->vfs_inode);
5432                 if (inode) {
5433                         spin_unlock(&root->inode_lock);
5434                         if (atomic_read(&inode->i_count) > 1)
5435                                 d_prune_aliases(inode);
5436                         /*
5437                          * btrfs_drop_inode will have it removed from
5438                          * the inode cache when its usage count
5439                          * hits zero.
5440                          */
5441                         iput(inode);
5442                         cond_resched();
5443                         spin_lock(&root->inode_lock);
5444                         goto again;
5445                 }
5446
5447                 if (cond_resched_lock(&root->inode_lock))
5448                         goto again;
5449
5450                 node = rb_next(node);
5451         }
5452         spin_unlock(&root->inode_lock);
5453 }
5454
5455 static int btrfs_init_locked_inode(struct inode *inode, void *p)
5456 {
5457         struct btrfs_iget_args *args = p;
5458         inode->i_ino = args->location->objectid;
5459         memcpy(&BTRFS_I(inode)->location, args->location,
5460                sizeof(*args->location));
5461         BTRFS_I(inode)->root = args->root;
5462         return 0;
5463 }
5464
5465 static int btrfs_find_actor(struct inode *inode, void *opaque)
5466 {
5467         struct btrfs_iget_args *args = opaque;
5468         return args->location->objectid == BTRFS_I(inode)->location.objectid &&
5469                 args->root == BTRFS_I(inode)->root;
5470 }
5471
5472 static struct inode *btrfs_iget_locked(struct super_block *s,
5473                                        struct btrfs_key *location,
5474                                        struct btrfs_root *root)
5475 {
5476         struct inode *inode;
5477         struct btrfs_iget_args args;
5478         unsigned long hashval = btrfs_inode_hash(location->objectid, root);
5479
5480         args.location = location;
5481         args.root = root;
5482
5483         inode = iget5_locked(s, hashval, btrfs_find_actor,
5484                              btrfs_init_locked_inode,
5485                              (void *)&args);
5486         return inode;
5487 }
5488
5489 /* Get an inode object given its location and corresponding root.
5490  * Returns in *is_new if the inode was read from disk
5491  */
5492 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
5493                          struct btrfs_root *root, int *new)
5494 {
5495         struct inode *inode;
5496
5497         inode = btrfs_iget_locked(s, location, root);
5498         if (!inode)
5499                 return ERR_PTR(-ENOMEM);
5500
5501         if (inode->i_state & I_NEW) {
5502                 btrfs_read_locked_inode(inode);
5503                 if (!is_bad_inode(inode)) {
5504                         inode_tree_add(inode);
5505                         unlock_new_inode(inode);
5506                         if (new)
5507                                 *new = 1;
5508                 } else {
5509                         unlock_new_inode(inode);
5510                         iput(inode);
5511                         inode = ERR_PTR(-ESTALE);
5512                 }
5513         }
5514
5515         return inode;
5516 }
5517
5518 static struct inode *new_simple_dir(struct super_block *s,
5519                                     struct btrfs_key *key,
5520                                     struct btrfs_root *root)
5521 {
5522         struct inode *inode = new_inode(s);
5523
5524         if (!inode)
5525                 return ERR_PTR(-ENOMEM);
5526
5527         BTRFS_I(inode)->root = root;
5528         memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
5529         set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
5530
5531         inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
5532         inode->i_op = &btrfs_dir_ro_inode_operations;
5533         inode->i_fop = &simple_dir_operations;
5534         inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
5535         inode->i_mtime = CURRENT_TIME;
5536         inode->i_atime = inode->i_mtime;
5537         inode->i_ctime = inode->i_mtime;
5538         BTRFS_I(inode)->i_otime = inode->i_mtime;
5539
5540         return inode;
5541 }
5542
5543 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
5544 {
5545         struct inode *inode;
5546         struct btrfs_root *root = BTRFS_I(dir)->root;
5547         struct btrfs_root *sub_root = root;
5548         struct btrfs_key location;
5549         int index;
5550         int ret = 0;
5551
5552         if (dentry->d_name.len > BTRFS_NAME_LEN)
5553                 return ERR_PTR(-ENAMETOOLONG);
5554
5555         ret = btrfs_inode_by_name(dir, dentry, &location);
5556         if (ret < 0)
5557                 return ERR_PTR(ret);
5558
5559         if (location.objectid == 0)
5560                 return ERR_PTR(-ENOENT);
5561
5562         if (location.type == BTRFS_INODE_ITEM_KEY) {
5563                 inode = btrfs_iget(dir->i_sb, &location, root, NULL);
5564                 return inode;
5565         }
5566
5567         BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
5568
5569         index = srcu_read_lock(&root->fs_info->subvol_srcu);
5570         ret = fixup_tree_root_location(root, dir, dentry,
5571                                        &location, &sub_root);
5572         if (ret < 0) {
5573                 if (ret != -ENOENT)
5574                         inode = ERR_PTR(ret);
5575                 else
5576                         inode = new_simple_dir(dir->i_sb, &location, sub_root);
5577         } else {
5578                 inode = btrfs_iget(dir->i_sb, &location, sub_root, NULL);
5579         }
5580         srcu_read_unlock(&root->fs_info->subvol_srcu, index);
5581
5582         if (!IS_ERR(inode) && root != sub_root) {
5583                 down_read(&root->fs_info->cleanup_work_sem);
5584                 if (!(inode->i_sb->s_flags & MS_RDONLY))
5585                         ret = btrfs_orphan_cleanup(sub_root);
5586                 up_read(&root->fs_info->cleanup_work_sem);
5587                 if (ret) {
5588                         iput(inode);
5589                         inode = ERR_PTR(ret);
5590                 }
5591         }
5592
5593         return inode;
5594 }
5595
5596 static int btrfs_dentry_delete(const struct dentry *dentry)
5597 {
5598         struct btrfs_root *root;
5599         struct inode *inode = d_inode(dentry);
5600
5601         if (!inode && !IS_ROOT(dentry))
5602                 inode = d_inode(dentry->d_parent);
5603
5604         if (inode) {
5605                 root = BTRFS_I(inode)->root;
5606                 if (btrfs_root_refs(&root->root_item) == 0)
5607                         return 1;
5608
5609                 if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5610                         return 1;
5611         }
5612         return 0;
5613 }
5614
5615 static void btrfs_dentry_release(struct dentry *dentry)
5616 {
5617         kfree(dentry->d_fsdata);
5618 }
5619
5620 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
5621                                    unsigned int flags)
5622 {
5623         struct inode *inode;
5624
5625         inode = btrfs_lookup_dentry(dir, dentry);
5626         if (IS_ERR(inode)) {
5627                 if (PTR_ERR(inode) == -ENOENT)
5628                         inode = NULL;
5629                 else
5630                         return ERR_CAST(inode);
5631         }
5632
5633         return d_splice_alias(inode, dentry);
5634 }
5635
5636 unsigned char btrfs_filetype_table[] = {
5637         DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
5638 };
5639
5640 static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
5641 {
5642         struct inode *inode = file_inode(file);
5643         struct btrfs_root *root = BTRFS_I(inode)->root;
5644         struct btrfs_item *item;
5645         struct btrfs_dir_item *di;
5646         struct btrfs_key key;
5647         struct btrfs_key found_key;
5648         struct btrfs_path *path;
5649         struct list_head ins_list;
5650         struct list_head del_list;
5651         int ret;
5652         struct extent_buffer *leaf;
5653         int slot;
5654         unsigned char d_type;
5655         int over = 0;
5656         u32 di_cur;
5657         u32 di_total;
5658         u32 di_len;
5659         int key_type = BTRFS_DIR_INDEX_KEY;
5660         char tmp_name[32];
5661         char *name_ptr;
5662         int name_len;
5663         int is_curr = 0;        /* ctx->pos points to the current index? */
5664
5665         /* FIXME, use a real flag for deciding about the key type */
5666         if (root->fs_info->tree_root == root)
5667                 key_type = BTRFS_DIR_ITEM_KEY;
5668
5669         if (!dir_emit_dots(file, ctx))
5670                 return 0;
5671
5672         path = btrfs_alloc_path();
5673         if (!path)
5674                 return -ENOMEM;
5675
5676         path->reada = 1;
5677
5678         if (key_type == BTRFS_DIR_INDEX_KEY) {
5679                 INIT_LIST_HEAD(&ins_list);
5680                 INIT_LIST_HEAD(&del_list);
5681                 btrfs_get_delayed_items(inode, &ins_list, &del_list);
5682         }
5683
5684         key.type = key_type;
5685         key.offset = ctx->pos;
5686         key.objectid = btrfs_ino(inode);
5687
5688         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5689         if (ret < 0)
5690                 goto err;
5691
5692         while (1) {
5693                 leaf = path->nodes[0];
5694                 slot = path->slots[0];
5695                 if (slot >= btrfs_header_nritems(leaf)) {
5696                         ret = btrfs_next_leaf(root, path);
5697                         if (ret < 0)
5698                                 goto err;
5699                         else if (ret > 0)
5700                                 break;
5701                         continue;
5702                 }
5703
5704                 item = btrfs_item_nr(slot);
5705                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
5706
5707                 if (found_key.objectid != key.objectid)
5708                         break;
5709                 if (found_key.type != key_type)
5710                         break;
5711                 if (found_key.offset < ctx->pos)
5712                         goto next;
5713                 if (key_type == BTRFS_DIR_INDEX_KEY &&
5714                     btrfs_should_delete_dir_index(&del_list,
5715                                                   found_key.offset))
5716                         goto next;
5717
5718                 ctx->pos = found_key.offset;
5719                 is_curr = 1;
5720
5721                 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
5722                 di_cur = 0;
5723                 di_total = btrfs_item_size(leaf, item);
5724
5725                 while (di_cur < di_total) {
5726                         struct btrfs_key location;
5727
5728                         if (verify_dir_item(root, leaf, di))
5729                                 break;
5730
5731                         name_len = btrfs_dir_name_len(leaf, di);
5732                         if (name_len <= sizeof(tmp_name)) {
5733                                 name_ptr = tmp_name;
5734                         } else {
5735                                 name_ptr = kmalloc(name_len, GFP_NOFS);
5736                                 if (!name_ptr) {
5737                                         ret = -ENOMEM;
5738                                         goto err;
5739                                 }
5740                         }
5741                         read_extent_buffer(leaf, name_ptr,
5742                                            (unsigned long)(di + 1), name_len);
5743
5744                         d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
5745                         btrfs_dir_item_key_to_cpu(leaf, di, &location);
5746
5747
5748                         /* is this a reference to our own snapshot? If so
5749                          * skip it.
5750                          *
5751                          * In contrast to old kernels, we insert the snapshot's
5752                          * dir item and dir index after it has been created, so
5753                          * we won't find a reference to our own snapshot. We
5754                          * still keep the following code for backward
5755                          * compatibility.
5756                          */
5757                         if (location.type == BTRFS_ROOT_ITEM_KEY &&
5758                             location.objectid == root->root_key.objectid) {
5759                                 over = 0;
5760                                 goto skip;
5761                         }
5762                         over = !dir_emit(ctx, name_ptr, name_len,
5763                                        location.objectid, d_type);
5764
5765 skip:
5766                         if (name_ptr != tmp_name)
5767                                 kfree(name_ptr);
5768
5769                         if (over)
5770                                 goto nopos;
5771                         di_len = btrfs_dir_name_len(leaf, di) +
5772                                  btrfs_dir_data_len(leaf, di) + sizeof(*di);
5773                         di_cur += di_len;
5774                         di = (struct btrfs_dir_item *)((char *)di + di_len);
5775                 }
5776 next:
5777                 path->slots[0]++;
5778         }
5779
5780         if (key_type == BTRFS_DIR_INDEX_KEY) {
5781                 if (is_curr)
5782                         ctx->pos++;
5783                 ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
5784                 if (ret)
5785                         goto nopos;
5786         }
5787
5788         /* Reached end of directory/root. Bump pos past the last item. */
5789         ctx->pos++;
5790
5791         /*
5792          * Stop new entries from being returned after we return the last
5793          * entry.
5794          *
5795          * New directory entries are assigned a strictly increasing
5796          * offset.  This means that new entries created during readdir
5797          * are *guaranteed* to be seen in the future by that readdir.
5798          * This has broken buggy programs which operate on names as
5799          * they're returned by readdir.  Until we re-use freed offsets
5800          * we have this hack to stop new entries from being returned
5801          * under the assumption that they'll never reach this huge
5802          * offset.
5803          *
5804          * This is being careful not to overflow 32bit loff_t unless the
5805          * last entry requires it because doing so has broken 32bit apps
5806          * in the past.
5807          */
5808         if (key_type == BTRFS_DIR_INDEX_KEY) {
5809                 if (ctx->pos >= INT_MAX)
5810                         ctx->pos = LLONG_MAX;
5811                 else
5812                         ctx->pos = INT_MAX;
5813         }
5814 nopos:
5815         ret = 0;
5816 err:
5817         if (key_type == BTRFS_DIR_INDEX_KEY)
5818                 btrfs_put_delayed_items(&ins_list, &del_list);
5819         btrfs_free_path(path);
5820         return ret;
5821 }
5822
5823 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
5824 {
5825         struct btrfs_root *root = BTRFS_I(inode)->root;
5826         struct btrfs_trans_handle *trans;
5827         int ret = 0;
5828         bool nolock = false;
5829
5830         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5831                 return 0;
5832
5833         if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode))
5834                 nolock = true;
5835
5836         if (wbc->sync_mode == WB_SYNC_ALL) {
5837                 if (nolock)
5838                         trans = btrfs_join_transaction_nolock(root);
5839                 else
5840                         trans = btrfs_join_transaction(root);
5841                 if (IS_ERR(trans))
5842                         return PTR_ERR(trans);
5843                 ret = btrfs_commit_transaction(trans, root);
5844         }
5845         return ret;
5846 }
5847
5848 /*
5849  * This is somewhat expensive, updating the tree every time the
5850  * inode changes.  But, it is most likely to find the inode in cache.
5851  * FIXME, needs more benchmarking...there are no reasons other than performance
5852  * to keep or drop this code.
5853  */
5854 static int btrfs_dirty_inode(struct inode *inode)
5855 {
5856         struct btrfs_root *root = BTRFS_I(inode)->root;
5857         struct btrfs_trans_handle *trans;
5858         int ret;
5859
5860         if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
5861                 return 0;
5862
5863         trans = btrfs_join_transaction(root);
5864         if (IS_ERR(trans))
5865                 return PTR_ERR(trans);
5866
5867         ret = btrfs_update_inode(trans, root, inode);
5868         if (ret && ret == -ENOSPC) {
5869                 /* whoops, lets try again with the full transaction */
5870                 btrfs_end_transaction(trans, root);
5871                 trans = btrfs_start_transaction(root, 1);
5872                 if (IS_ERR(trans))
5873                         return PTR_ERR(trans);
5874
5875                 ret = btrfs_update_inode(trans, root, inode);
5876         }
5877         btrfs_end_transaction(trans, root);
5878         if (BTRFS_I(inode)->delayed_node)
5879                 btrfs_balance_delayed_items(root);
5880
5881         return ret;
5882 }
5883
5884 /*
5885  * This is a copy of file_update_time.  We need this so we can return error on
5886  * ENOSPC for updating the inode in the case of file write and mmap writes.
5887  */
5888 static int btrfs_update_time(struct inode *inode, struct timespec *now,
5889                              int flags)
5890 {
5891         struct btrfs_root *root = BTRFS_I(inode)->root;
5892
5893         if (btrfs_root_readonly(root))
5894                 return -EROFS;
5895
5896         if (flags & S_VERSION)
5897                 inode_inc_iversion(inode);
5898         if (flags & S_CTIME)
5899                 inode->i_ctime = *now;
5900         if (flags & S_MTIME)
5901                 inode->i_mtime = *now;
5902         if (flags & S_ATIME)
5903                 inode->i_atime = *now;
5904         return btrfs_dirty_inode(inode);
5905 }
5906
5907 /*
5908  * find the highest existing sequence number in a directory
5909  * and then set the in-memory index_cnt variable to reflect
5910  * free sequence numbers
5911  */
5912 static int btrfs_set_inode_index_count(struct inode *inode)
5913 {
5914         struct btrfs_root *root = BTRFS_I(inode)->root;
5915         struct btrfs_key key, found_key;
5916         struct btrfs_path *path;
5917         struct extent_buffer *leaf;
5918         int ret;
5919
5920         key.objectid = btrfs_ino(inode);
5921         key.type = BTRFS_DIR_INDEX_KEY;
5922         key.offset = (u64)-1;
5923
5924         path = btrfs_alloc_path();
5925         if (!path)
5926                 return -ENOMEM;
5927
5928         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5929         if (ret < 0)
5930                 goto out;
5931         /* FIXME: we should be able to handle this */
5932         if (ret == 0)
5933                 goto out;
5934         ret = 0;
5935
5936         /*
5937          * MAGIC NUMBER EXPLANATION:
5938          * since we search a directory based on f_pos we have to start at 2
5939          * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
5940          * else has to start at 2
5941          */
5942         if (path->slots[0] == 0) {
5943                 BTRFS_I(inode)->index_cnt = 2;
5944                 goto out;
5945         }
5946
5947         path->slots[0]--;
5948
5949         leaf = path->nodes[0];
5950         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5951
5952         if (found_key.objectid != btrfs_ino(inode) ||
5953             found_key.type != BTRFS_DIR_INDEX_KEY) {
5954                 BTRFS_I(inode)->index_cnt = 2;
5955                 goto out;
5956         }
5957
5958         BTRFS_I(inode)->index_cnt = found_key.offset + 1;
5959 out:
5960         btrfs_free_path(path);
5961         return ret;
5962 }
5963
5964 /*
5965  * helper to find a free sequence number in a given directory.  This current
5966  * code is very simple, later versions will do smarter things in the btree
5967  */
5968 int btrfs_set_inode_index(struct inode *dir, u64 *index)
5969 {
5970         int ret = 0;
5971
5972         if (BTRFS_I(dir)->index_cnt == (u64)-1) {
5973                 ret = btrfs_inode_delayed_dir_index_count(dir);
5974                 if (ret) {
5975                         ret = btrfs_set_inode_index_count(dir);
5976                         if (ret)
5977                                 return ret;
5978                 }
5979         }
5980
5981         *index = BTRFS_I(dir)->index_cnt;
5982         BTRFS_I(dir)->index_cnt++;
5983
5984         return ret;
5985 }
5986
5987 static int btrfs_insert_inode_locked(struct inode *inode)
5988 {
5989         struct btrfs_iget_args args;
5990         args.location = &BTRFS_I(inode)->location;
5991         args.root = BTRFS_I(inode)->root;
5992
5993         return insert_inode_locked4(inode,
5994                    btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root),
5995                    btrfs_find_actor, &args);
5996 }
5997
5998 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
5999                                      struct btrfs_root *root,
6000                                      struct inode *dir,
6001                                      const char *name, int name_len,
6002                                      u64 ref_objectid, u64 objectid,
6003                                      umode_t mode, u64 *index)
6004 {
6005         struct inode *inode;
6006         struct btrfs_inode_item *inode_item;
6007         struct btrfs_key *location;
6008         struct btrfs_path *path;
6009         struct btrfs_inode_ref *ref;
6010         struct btrfs_key key[2];
6011         u32 sizes[2];
6012         int nitems = name ? 2 : 1;
6013         unsigned long ptr;
6014         int ret;
6015
6016         path = btrfs_alloc_path();
6017         if (!path)
6018                 return ERR_PTR(-ENOMEM);
6019
6020         inode = new_inode(root->fs_info->sb);
6021         if (!inode) {
6022                 btrfs_free_path(path);
6023                 return ERR_PTR(-ENOMEM);
6024         }
6025
6026         /*
6027          * O_TMPFILE, set link count to 0, so that after this point,
6028          * we fill in an inode item with the correct link count.
6029          */
6030         if (!name)
6031                 set_nlink(inode, 0);
6032
6033         /*
6034          * we have to initialize this early, so we can reclaim the inode
6035          * number if we fail afterwards in this function.
6036          */
6037         inode->i_ino = objectid;
6038
6039         if (dir && name) {
6040                 trace_btrfs_inode_request(dir);
6041
6042                 ret = btrfs_set_inode_index(dir, index);
6043                 if (ret) {
6044                         btrfs_free_path(path);
6045                         iput(inode);
6046                         return ERR_PTR(ret);
6047                 }
6048         } else if (dir) {
6049                 *index = 0;
6050         }
6051         /*
6052          * index_cnt is ignored for everything but a dir,
6053          * btrfs_get_inode_index_count has an explanation for the magic
6054          * number
6055          */
6056         BTRFS_I(inode)->index_cnt = 2;
6057         BTRFS_I(inode)->dir_index = *index;
6058         BTRFS_I(inode)->root = root;
6059         BTRFS_I(inode)->generation = trans->transid;
6060         inode->i_generation = BTRFS_I(inode)->generation;
6061
6062         /*
6063          * We could have gotten an inode number from somebody who was fsynced
6064          * and then removed in this same transaction, so let's just set full
6065          * sync since it will be a full sync anyway and this will blow away the
6066          * old info in the log.
6067          */
6068         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
6069
6070         key[0].objectid = objectid;
6071         key[0].type = BTRFS_INODE_ITEM_KEY;
6072         key[0].offset = 0;
6073
6074         sizes[0] = sizeof(struct btrfs_inode_item);
6075
6076         if (name) {
6077                 /*
6078                  * Start new inodes with an inode_ref. This is slightly more
6079                  * efficient for small numbers of hard links since they will
6080                  * be packed into one item. Extended refs will kick in if we
6081                  * add more hard links than can fit in the ref item.
6082                  */
6083                 key[1].objectid = objectid;
6084                 key[1].type = BTRFS_INODE_REF_KEY;
6085                 key[1].offset = ref_objectid;
6086
6087                 sizes[1] = name_len + sizeof(*ref);
6088         }
6089
6090         location = &BTRFS_I(inode)->location;
6091         location->objectid = objectid;
6092         location->offset = 0;
6093         location->type = BTRFS_INODE_ITEM_KEY;
6094
6095         ret = btrfs_insert_inode_locked(inode);
6096         if (ret < 0)
6097                 goto fail;
6098
6099         path->leave_spinning = 1;
6100         ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems);
6101         if (ret != 0)
6102                 goto fail_unlock;
6103
6104         inode_init_owner(inode, dir, mode);
6105         inode_set_bytes(inode, 0);
6106
6107         inode->i_mtime = CURRENT_TIME;
6108         inode->i_atime = inode->i_mtime;
6109         inode->i_ctime = inode->i_mtime;
6110         BTRFS_I(inode)->i_otime = inode->i_mtime;
6111
6112         inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
6113                                   struct btrfs_inode_item);
6114         memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
6115                              sizeof(*inode_item));
6116         fill_inode_item(trans, path->nodes[0], inode_item, inode);
6117
6118         if (name) {
6119                 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
6120                                      struct btrfs_inode_ref);
6121                 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
6122                 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
6123                 ptr = (unsigned long)(ref + 1);
6124                 write_extent_buffer(path->nodes[0], name, ptr, name_len);
6125         }
6126
6127         btrfs_mark_buffer_dirty(path->nodes[0]);
6128         btrfs_free_path(path);
6129
6130         btrfs_inherit_iflags(inode, dir);
6131
6132         if (S_ISREG(mode)) {
6133                 if (btrfs_test_opt(root, NODATASUM))
6134                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
6135                 if (btrfs_test_opt(root, NODATACOW))
6136                         BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
6137                                 BTRFS_INODE_NODATASUM;
6138         }
6139
6140         inode_tree_add(inode);
6141
6142         trace_btrfs_inode_new(inode);
6143         btrfs_set_inode_last_trans(trans, inode);
6144
6145         btrfs_update_root_times(trans, root);
6146
6147         ret = btrfs_inode_inherit_props(trans, inode, dir);
6148         if (ret)
6149                 btrfs_err(root->fs_info,
6150                           "error inheriting props for ino %llu (root %llu): %d",
6151                           btrfs_ino(inode), root->root_key.objectid, ret);
6152
6153         return inode;
6154
6155 fail_unlock:
6156         unlock_new_inode(inode);
6157 fail:
6158         if (dir && name)
6159                 BTRFS_I(dir)->index_cnt--;
6160         btrfs_free_path(path);
6161         iput(inode);
6162         return ERR_PTR(ret);
6163 }
6164
6165 static inline u8 btrfs_inode_type(struct inode *inode)
6166 {
6167         return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
6168 }
6169
6170 /*
6171  * utility function to add 'inode' into 'parent_inode' with
6172  * a give name and a given sequence number.
6173  * if 'add_backref' is true, also insert a backref from the
6174  * inode to the parent directory.
6175  */
6176 int btrfs_add_link(struct btrfs_trans_handle *trans,
6177                    struct inode *parent_inode, struct inode *inode,
6178                    const char *name, int name_len, int add_backref, u64 index)
6179 {
6180         int ret = 0;
6181         struct btrfs_key key;
6182         struct btrfs_root *root = BTRFS_I(parent_inode)->root;
6183         u64 ino = btrfs_ino(inode);
6184         u64 parent_ino = btrfs_ino(parent_inode);
6185
6186         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6187                 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
6188         } else {
6189                 key.objectid = ino;
6190                 key.type = BTRFS_INODE_ITEM_KEY;
6191                 key.offset = 0;
6192         }
6193
6194         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6195                 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
6196                                          key.objectid, root->root_key.objectid,
6197                                          parent_ino, index, name, name_len);
6198         } else if (add_backref) {
6199                 ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
6200                                              parent_ino, index);
6201         }
6202
6203         /* Nothing to clean up yet */
6204         if (ret)
6205                 return ret;
6206
6207         ret = btrfs_insert_dir_item(trans, root, name, name_len,
6208                                     parent_inode, &key,
6209                                     btrfs_inode_type(inode), index);
6210         if (ret == -EEXIST || ret == -EOVERFLOW)
6211                 goto fail_dir_item;
6212         else if (ret) {
6213                 btrfs_abort_transaction(trans, root, ret);
6214                 return ret;
6215         }
6216
6217         btrfs_i_size_write(parent_inode, parent_inode->i_size +
6218                            name_len * 2);
6219         inode_inc_iversion(parent_inode);
6220         parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
6221         ret = btrfs_update_inode(trans, root, parent_inode);
6222         if (ret)
6223                 btrfs_abort_transaction(trans, root, ret);
6224         return ret;
6225
6226 fail_dir_item:
6227         if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
6228                 u64 local_index;
6229                 int err;
6230                 err = btrfs_del_root_ref(trans, root->fs_info->tree_root,
6231                                  key.objectid, root->root_key.objectid,
6232                                  parent_ino, &local_index, name, name_len);
6233
6234         } else if (add_backref) {
6235                 u64 local_index;
6236                 int err;
6237
6238                 err = btrfs_del_inode_ref(trans, root, name, name_len,
6239                                           ino, parent_ino, &local_index);
6240         }
6241         return ret;
6242 }
6243
6244 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
6245                             struct inode *dir, struct dentry *dentry,
6246                             struct inode *inode, int backref, u64 index)
6247 {
6248         int err = btrfs_add_link(trans, dir, inode,
6249                                  dentry->d_name.name, dentry->d_name.len,
6250                                  backref, index);
6251         if (err > 0)
6252                 err = -EEXIST;
6253         return err;
6254 }
6255
6256 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
6257                         umode_t mode, dev_t rdev)
6258 {
6259         struct btrfs_trans_handle *trans;
6260         struct btrfs_root *root = BTRFS_I(dir)->root;
6261         struct inode *inode = NULL;
6262         int err;
6263         int drop_inode = 0;
6264         u64 objectid;
6265         u64 index = 0;
6266
6267         if (!new_valid_dev(rdev))
6268                 return -EINVAL;
6269
6270         /*
6271          * 2 for inode item and ref
6272          * 2 for dir items
6273          * 1 for xattr if selinux is on
6274          */
6275         trans = btrfs_start_transaction(root, 5);
6276         if (IS_ERR(trans))
6277                 return PTR_ERR(trans);
6278
6279         err = btrfs_find_free_ino(root, &objectid);
6280         if (err)
6281                 goto out_unlock;
6282
6283         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6284                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6285                                 mode, &index);
6286         if (IS_ERR(inode)) {
6287                 err = PTR_ERR(inode);
6288                 goto out_unlock;
6289         }
6290
6291         /*
6292         * If the active LSM wants to access the inode during
6293         * d_instantiate it needs these. Smack checks to see
6294         * if the filesystem supports xattrs by looking at the
6295         * ops vector.
6296         */
6297         inode->i_op = &btrfs_special_inode_operations;
6298         init_special_inode(inode, inode->i_mode, rdev);
6299
6300         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6301         if (err)
6302                 goto out_unlock_inode;
6303
6304         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6305         if (err) {
6306                 goto out_unlock_inode;
6307         } else {
6308                 btrfs_update_inode(trans, root, inode);
6309                 unlock_new_inode(inode);
6310                 d_instantiate(dentry, inode);
6311         }
6312
6313 out_unlock:
6314         btrfs_end_transaction(trans, root);
6315         btrfs_balance_delayed_items(root);
6316         btrfs_btree_balance_dirty(root);
6317         if (drop_inode) {
6318                 inode_dec_link_count(inode);
6319                 iput(inode);
6320         }
6321         return err;
6322
6323 out_unlock_inode:
6324         drop_inode = 1;
6325         unlock_new_inode(inode);
6326         goto out_unlock;
6327
6328 }
6329
6330 static int btrfs_create(struct inode *dir, struct dentry *dentry,
6331                         umode_t mode, bool excl)
6332 {
6333         struct btrfs_trans_handle *trans;
6334         struct btrfs_root *root = BTRFS_I(dir)->root;
6335         struct inode *inode = NULL;
6336         int drop_inode_on_err = 0;
6337         int err;
6338         u64 objectid;
6339         u64 index = 0;
6340
6341         /*
6342          * 2 for inode item and ref
6343          * 2 for dir items
6344          * 1 for xattr if selinux is on
6345          */
6346         trans = btrfs_start_transaction(root, 5);
6347         if (IS_ERR(trans))
6348                 return PTR_ERR(trans);
6349
6350         err = btrfs_find_free_ino(root, &objectid);
6351         if (err)
6352                 goto out_unlock;
6353
6354         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6355                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6356                                 mode, &index);
6357         if (IS_ERR(inode)) {
6358                 err = PTR_ERR(inode);
6359                 goto out_unlock;
6360         }
6361         drop_inode_on_err = 1;
6362         /*
6363         * If the active LSM wants to access the inode during
6364         * d_instantiate it needs these. Smack checks to see
6365         * if the filesystem supports xattrs by looking at the
6366         * ops vector.
6367         */
6368         inode->i_fop = &btrfs_file_operations;
6369         inode->i_op = &btrfs_file_inode_operations;
6370         inode->i_mapping->a_ops = &btrfs_aops;
6371
6372         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6373         if (err)
6374                 goto out_unlock_inode;
6375
6376         err = btrfs_update_inode(trans, root, inode);
6377         if (err)
6378                 goto out_unlock_inode;
6379
6380         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6381         if (err)
6382                 goto out_unlock_inode;
6383
6384         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
6385         unlock_new_inode(inode);
6386         d_instantiate(dentry, inode);
6387
6388 out_unlock:
6389         btrfs_end_transaction(trans, root);
6390         if (err && drop_inode_on_err) {
6391                 inode_dec_link_count(inode);
6392                 iput(inode);
6393         }
6394         btrfs_balance_delayed_items(root);
6395         btrfs_btree_balance_dirty(root);
6396         return err;
6397
6398 out_unlock_inode:
6399         unlock_new_inode(inode);
6400         goto out_unlock;
6401
6402 }
6403
6404 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
6405                       struct dentry *dentry)
6406 {
6407         struct btrfs_trans_handle *trans;
6408         struct btrfs_root *root = BTRFS_I(dir)->root;
6409         struct inode *inode = d_inode(old_dentry);
6410         u64 index;
6411         int err;
6412         int drop_inode = 0;
6413
6414         /* do not allow sys_link's with other subvols of the same device */
6415         if (root->objectid != BTRFS_I(inode)->root->objectid)
6416                 return -EXDEV;
6417
6418         if (inode->i_nlink >= BTRFS_LINK_MAX)
6419                 return -EMLINK;
6420
6421         err = btrfs_set_inode_index(dir, &index);
6422         if (err)
6423                 goto fail;
6424
6425         /*
6426          * 2 items for inode and inode ref
6427          * 2 items for dir items
6428          * 1 item for parent inode
6429          */
6430         trans = btrfs_start_transaction(root, 5);
6431         if (IS_ERR(trans)) {
6432                 err = PTR_ERR(trans);
6433                 goto fail;
6434         }
6435
6436         /* There are several dir indexes for this inode, clear the cache. */
6437         BTRFS_I(inode)->dir_index = 0ULL;
6438         inc_nlink(inode);
6439         inode_inc_iversion(inode);
6440         inode->i_ctime = CURRENT_TIME;
6441         ihold(inode);
6442         set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags);
6443
6444         err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
6445
6446         if (err) {
6447                 drop_inode = 1;
6448         } else {
6449                 struct dentry *parent = dentry->d_parent;
6450                 err = btrfs_update_inode(trans, root, inode);
6451                 if (err)
6452                         goto fail;
6453                 if (inode->i_nlink == 1) {
6454                         /*
6455                          * If new hard link count is 1, it's a file created
6456                          * with open(2) O_TMPFILE flag.
6457                          */
6458                         err = btrfs_orphan_del(trans, inode);
6459                         if (err)
6460                                 goto fail;
6461                 }
6462                 d_instantiate(dentry, inode);
6463                 btrfs_log_new_name(trans, inode, NULL, parent);
6464         }
6465
6466         btrfs_end_transaction(trans, root);
6467         btrfs_balance_delayed_items(root);
6468 fail:
6469         if (drop_inode) {
6470                 inode_dec_link_count(inode);
6471                 iput(inode);
6472         }
6473         btrfs_btree_balance_dirty(root);
6474         return err;
6475 }
6476
6477 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
6478 {
6479         struct inode *inode = NULL;
6480         struct btrfs_trans_handle *trans;
6481         struct btrfs_root *root = BTRFS_I(dir)->root;
6482         int err = 0;
6483         int drop_on_err = 0;
6484         u64 objectid = 0;
6485         u64 index = 0;
6486
6487         /*
6488          * 2 items for inode and ref
6489          * 2 items for dir items
6490          * 1 for xattr if selinux is on
6491          */
6492         trans = btrfs_start_transaction(root, 5);
6493         if (IS_ERR(trans))
6494                 return PTR_ERR(trans);
6495
6496         err = btrfs_find_free_ino(root, &objectid);
6497         if (err)
6498                 goto out_fail;
6499
6500         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6501                                 dentry->d_name.len, btrfs_ino(dir), objectid,
6502                                 S_IFDIR | mode, &index);
6503         if (IS_ERR(inode)) {
6504                 err = PTR_ERR(inode);
6505                 goto out_fail;
6506         }
6507
6508         drop_on_err = 1;
6509         /* these must be set before we unlock the inode */
6510         inode->i_op = &btrfs_dir_inode_operations;
6511         inode->i_fop = &btrfs_dir_file_operations;
6512
6513         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
6514         if (err)
6515                 goto out_fail_inode;
6516
6517         btrfs_i_size_write(inode, 0);
6518         err = btrfs_update_inode(trans, root, inode);
6519         if (err)
6520                 goto out_fail_inode;
6521
6522         err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
6523                              dentry->d_name.len, 0, index);
6524         if (err)
6525                 goto out_fail_inode;
6526
6527         d_instantiate(dentry, inode);
6528         /*
6529          * mkdir is special.  We're unlocking after we call d_instantiate
6530          * to avoid a race with nfsd calling d_instantiate.
6531          */
6532         unlock_new_inode(inode);
6533         drop_on_err = 0;
6534
6535 out_fail:
6536         btrfs_end_transaction(trans, root);
6537         if (drop_on_err) {
6538                 inode_dec_link_count(inode);
6539                 iput(inode);
6540         }
6541         btrfs_balance_delayed_items(root);
6542         btrfs_btree_balance_dirty(root);
6543         return err;
6544
6545 out_fail_inode:
6546         unlock_new_inode(inode);
6547         goto out_fail;
6548 }
6549
6550 /* Find next extent map of a given extent map, caller needs to ensure locks */
6551 static struct extent_map *next_extent_map(struct extent_map *em)
6552 {
6553         struct rb_node *next;
6554
6555         next = rb_next(&em->rb_node);
6556         if (!next)
6557                 return NULL;
6558         return container_of(next, struct extent_map, rb_node);
6559 }
6560
6561 static struct extent_map *prev_extent_map(struct extent_map *em)
6562 {
6563         struct rb_node *prev;
6564
6565         prev = rb_prev(&em->rb_node);
6566         if (!prev)
6567                 return NULL;
6568         return container_of(prev, struct extent_map, rb_node);
6569 }
6570
6571 /* helper for btfs_get_extent.  Given an existing extent in the tree,
6572  * the existing extent is the nearest extent to map_start,
6573  * and an extent that you want to insert, deal with overlap and insert
6574  * the best fitted new extent into the tree.
6575  */
6576 static int merge_extent_mapping(struct extent_map_tree *em_tree,
6577                                 struct extent_map *existing,
6578                                 struct extent_map *em,
6579                                 u64 map_start)
6580 {
6581         struct extent_map *prev;
6582         struct extent_map *next;
6583         u64 start;
6584         u64 end;
6585         u64 start_diff;
6586
6587         BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
6588
6589         if (existing->start > map_start) {
6590                 next = existing;
6591                 prev = prev_extent_map(next);
6592         } else {
6593                 prev = existing;
6594                 next = next_extent_map(prev);
6595         }
6596
6597         start = prev ? extent_map_end(prev) : em->start;
6598         start = max_t(u64, start, em->start);
6599         end = next ? next->start : extent_map_end(em);
6600         end = min_t(u64, end, extent_map_end(em));
6601         start_diff = start - em->start;
6602         em->start = start;
6603         em->len = end - start;
6604         if (em->block_start < EXTENT_MAP_LAST_BYTE &&
6605             !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
6606                 em->block_start += start_diff;
6607                 em->block_len -= start_diff;
6608         }
6609         return add_extent_mapping(em_tree, em, 0);
6610 }
6611
6612 static noinline int uncompress_inline(struct btrfs_path *path,
6613                                       struct inode *inode, struct page *page,
6614                                       size_t pg_offset, u64 extent_offset,
6615                                       struct btrfs_file_extent_item *item)
6616 {
6617         int ret;
6618         struct extent_buffer *leaf = path->nodes[0];
6619         char *tmp;
6620         size_t max_size;
6621         unsigned long inline_size;
6622         unsigned long ptr;
6623         int compress_type;
6624
6625         WARN_ON(pg_offset != 0);
6626         compress_type = btrfs_file_extent_compression(leaf, item);
6627         max_size = btrfs_file_extent_ram_bytes(leaf, item);
6628         inline_size = btrfs_file_extent_inline_item_len(leaf,
6629                                         btrfs_item_nr(path->slots[0]));
6630         tmp = kmalloc(inline_size, GFP_NOFS);
6631         if (!tmp)
6632                 return -ENOMEM;
6633         ptr = btrfs_file_extent_inline_start(item);
6634
6635         read_extent_buffer(leaf, tmp, ptr, inline_size);
6636
6637         max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
6638         ret = btrfs_decompress(compress_type, tmp, page,
6639                                extent_offset, inline_size, max_size);
6640         kfree(tmp);
6641         return ret;
6642 }
6643
6644 /*
6645  * a bit scary, this does extent mapping from logical file offset to the disk.
6646  * the ugly parts come from merging extents from the disk with the in-ram
6647  * representation.  This gets more complex because of the data=ordered code,
6648  * where the in-ram extents might be locked pending data=ordered completion.
6649  *
6650  * This also copies inline extents directly into the page.
6651  */
6652
6653 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
6654                                     size_t pg_offset, u64 start, u64 len,
6655                                     int create)
6656 {
6657         int ret;
6658         int err = 0;
6659         u64 extent_start = 0;
6660         u64 extent_end = 0;
6661         u64 objectid = btrfs_ino(inode);
6662         u32 found_type;
6663         struct btrfs_path *path = NULL;
6664         struct btrfs_root *root = BTRFS_I(inode)->root;
6665         struct btrfs_file_extent_item *item;
6666         struct extent_buffer *leaf;
6667         struct btrfs_key found_key;
6668         struct extent_map *em = NULL;
6669         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
6670         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6671         struct btrfs_trans_handle *trans = NULL;
6672         const bool new_inline = !page || create;
6673
6674 again:
6675         read_lock(&em_tree->lock);
6676         em = lookup_extent_mapping(em_tree, start, len);
6677         if (em)
6678                 em->bdev = root->fs_info->fs_devices->latest_bdev;
6679         read_unlock(&em_tree->lock);
6680
6681         if (em) {
6682                 if (em->start > start || em->start + em->len <= start)
6683                         free_extent_map(em);
6684                 else if (em->block_start == EXTENT_MAP_INLINE && page)
6685                         free_extent_map(em);
6686                 else
6687                         goto out;
6688         }
6689         em = alloc_extent_map();
6690         if (!em) {
6691                 err = -ENOMEM;
6692                 goto out;
6693         }
6694         em->bdev = root->fs_info->fs_devices->latest_bdev;
6695         em->start = EXTENT_MAP_HOLE;
6696         em->orig_start = EXTENT_MAP_HOLE;
6697         em->len = (u64)-1;
6698         em->block_len = (u64)-1;
6699
6700         if (!path) {
6701                 path = btrfs_alloc_path();
6702                 if (!path) {
6703                         err = -ENOMEM;
6704                         goto out;
6705                 }
6706                 /*
6707                  * Chances are we'll be called again, so go ahead and do
6708                  * readahead
6709                  */
6710                 path->reada = 1;
6711         }
6712
6713         ret = btrfs_lookup_file_extent(trans, root, path,
6714                                        objectid, start, trans != NULL);
6715         if (ret < 0) {
6716                 err = ret;
6717                 goto out;
6718         }
6719
6720         if (ret != 0) {
6721                 if (path->slots[0] == 0)
6722                         goto not_found;
6723                 path->slots[0]--;
6724         }
6725
6726         leaf = path->nodes[0];
6727         item = btrfs_item_ptr(leaf, path->slots[0],
6728                               struct btrfs_file_extent_item);
6729         /* are we inside the extent that was found? */
6730         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6731         found_type = found_key.type;
6732         if (found_key.objectid != objectid ||
6733             found_type != BTRFS_EXTENT_DATA_KEY) {
6734                 /*
6735                  * If we backup past the first extent we want to move forward
6736                  * and see if there is an extent in front of us, otherwise we'll
6737                  * say there is a hole for our whole search range which can
6738                  * cause problems.
6739                  */
6740                 extent_end = start;
6741                 goto next;
6742         }
6743
6744         found_type = btrfs_file_extent_type(leaf, item);
6745         extent_start = found_key.offset;
6746         if (found_type == BTRFS_FILE_EXTENT_REG ||
6747             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6748                 extent_end = extent_start +
6749                        btrfs_file_extent_num_bytes(leaf, item);
6750         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6751                 size_t size;
6752                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6753                 extent_end = ALIGN(extent_start + size, root->sectorsize);
6754         }
6755 next:
6756         if (start >= extent_end) {
6757                 path->slots[0]++;
6758                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
6759                         ret = btrfs_next_leaf(root, path);
6760                         if (ret < 0) {
6761                                 err = ret;
6762                                 goto out;
6763                         }
6764                         if (ret > 0)
6765                                 goto not_found;
6766                         leaf = path->nodes[0];
6767                 }
6768                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6769                 if (found_key.objectid != objectid ||
6770                     found_key.type != BTRFS_EXTENT_DATA_KEY)
6771                         goto not_found;
6772                 if (start + len <= found_key.offset)
6773                         goto not_found;
6774                 if (start > found_key.offset)
6775                         goto next;
6776                 em->start = start;
6777                 em->orig_start = start;
6778                 em->len = found_key.offset - start;
6779                 goto not_found_em;
6780         }
6781
6782         btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em);
6783
6784         if (found_type == BTRFS_FILE_EXTENT_REG ||
6785             found_type == BTRFS_FILE_EXTENT_PREALLOC) {
6786                 goto insert;
6787         } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
6788                 unsigned long ptr;
6789                 char *map;
6790                 size_t size;
6791                 size_t extent_offset;
6792                 size_t copy_size;
6793
6794                 if (new_inline)
6795                         goto out;
6796
6797                 size = btrfs_file_extent_inline_len(leaf, path->slots[0], item);
6798                 extent_offset = page_offset(page) + pg_offset - extent_start;
6799                 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
6800                                 size - extent_offset);
6801                 em->start = extent_start + extent_offset;
6802                 em->len = ALIGN(copy_size, root->sectorsize);
6803                 em->orig_block_len = em->len;
6804                 em->orig_start = em->start;
6805                 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
6806                 if (create == 0 && !PageUptodate(page)) {
6807                         if (btrfs_file_extent_compression(leaf, item) !=
6808                             BTRFS_COMPRESS_NONE) {
6809                                 ret = uncompress_inline(path, inode, page,
6810                                                         pg_offset,
6811                                                         extent_offset, item);
6812                                 if (ret) {
6813                                         err = ret;
6814                                         goto out;
6815                                 }
6816                         } else {
6817                                 map = kmap(page);
6818                                 read_extent_buffer(leaf, map + pg_offset, ptr,
6819                                                    copy_size);
6820                                 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
6821                                         memset(map + pg_offset + copy_size, 0,
6822                                                PAGE_CACHE_SIZE - pg_offset -
6823                                                copy_size);
6824                                 }
6825                                 kunmap(page);
6826                         }
6827                         flush_dcache_page(page);
6828                 } else if (create && PageUptodate(page)) {
6829                         BUG();
6830                         if (!trans) {
6831                                 kunmap(page);
6832                                 free_extent_map(em);
6833                                 em = NULL;
6834
6835                                 btrfs_release_path(path);
6836                                 trans = btrfs_join_transaction(root);
6837
6838                                 if (IS_ERR(trans))
6839                                         return ERR_CAST(trans);
6840                                 goto again;
6841                         }
6842                         map = kmap(page);
6843                         write_extent_buffer(leaf, map + pg_offset, ptr,
6844                                             copy_size);
6845                         kunmap(page);
6846                         btrfs_mark_buffer_dirty(leaf);
6847                 }
6848                 set_extent_uptodate(io_tree, em->start,
6849                                     extent_map_end(em) - 1, NULL, GFP_NOFS);
6850                 goto insert;
6851         }
6852 not_found:
6853         em->start = start;
6854         em->orig_start = start;
6855         em->len = len;
6856 not_found_em:
6857         em->block_start = EXTENT_MAP_HOLE;
6858         set_bit(EXTENT_FLAG_VACANCY, &em->flags);
6859 insert:
6860         btrfs_release_path(path);
6861         if (em->start > start || extent_map_end(em) <= start) {
6862                 btrfs_err(root->fs_info, "bad extent! em: [%llu %llu] passed [%llu %llu]",
6863                         em->start, em->len, start, len);
6864                 err = -EIO;
6865                 goto out;
6866         }
6867
6868         err = 0;
6869         write_lock(&em_tree->lock);
6870         ret = add_extent_mapping(em_tree, em, 0);
6871         /* it is possible that someone inserted the extent into the tree
6872          * while we had the lock dropped.  It is also possible that
6873          * an overlapping map exists in the tree
6874          */
6875         if (ret == -EEXIST) {
6876                 struct extent_map *existing;
6877
6878                 ret = 0;
6879
6880                 existing = search_extent_mapping(em_tree, start, len);
6881                 /*
6882                  * existing will always be non-NULL, since there must be
6883                  * extent causing the -EEXIST.
6884                  */
6885                 if (start >= extent_map_end(existing) ||
6886                     start <= existing->start) {
6887                         /*
6888                          * The existing extent map is the one nearest to
6889                          * the [start, start + len) range which overlaps
6890                          */
6891                         err = merge_extent_mapping(em_tree, existing,
6892                                                    em, start);
6893                         free_extent_map(existing);
6894                         if (err) {
6895                                 free_extent_map(em);
6896                                 em = NULL;
6897                         }
6898                 } else {
6899                         free_extent_map(em);
6900                         em = existing;
6901                         err = 0;
6902                 }
6903         }
6904         write_unlock(&em_tree->lock);
6905 out:
6906
6907         trace_btrfs_get_extent(root, em);
6908
6909         btrfs_free_path(path);
6910         if (trans) {
6911                 ret = btrfs_end_transaction(trans, root);
6912                 if (!err)
6913                         err = ret;
6914         }
6915         if (err) {
6916                 free_extent_map(em);
6917                 return ERR_PTR(err);
6918         }
6919         BUG_ON(!em); /* Error is always set */
6920         return em;
6921 }
6922
6923 struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
6924                                            size_t pg_offset, u64 start, u64 len,
6925                                            int create)
6926 {
6927         struct extent_map *em;
6928         struct extent_map *hole_em = NULL;
6929         u64 range_start = start;
6930         u64 end;
6931         u64 found;
6932         u64 found_end;
6933         int err = 0;
6934
6935         em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
6936         if (IS_ERR(em))
6937                 return em;
6938         if (em) {
6939                 /*
6940                  * if our em maps to
6941                  * -  a hole or
6942                  * -  a pre-alloc extent,
6943                  * there might actually be delalloc bytes behind it.
6944                  */
6945                 if (em->block_start != EXTENT_MAP_HOLE &&
6946                     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6947                         return em;
6948                 else
6949                         hole_em = em;
6950         }
6951
6952         /* check to see if we've wrapped (len == -1 or similar) */
6953         end = start + len;
6954         if (end < start)
6955                 end = (u64)-1;
6956         else
6957                 end -= 1;
6958
6959         em = NULL;
6960
6961         /* ok, we didn't find anything, lets look for delalloc */
6962         found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
6963                                  end, len, EXTENT_DELALLOC, 1);
6964         found_end = range_start + found;
6965         if (found_end < range_start)
6966                 found_end = (u64)-1;
6967
6968         /*
6969          * we didn't find anything useful, return
6970          * the original results from get_extent()
6971          */
6972         if (range_start > end || found_end <= start) {
6973                 em = hole_em;
6974                 hole_em = NULL;
6975                 goto out;
6976         }
6977
6978         /* adjust the range_start to make sure it doesn't
6979          * go backwards from the start they passed in
6980          */
6981         range_start = max(start, range_start);
6982         found = found_end - range_start;
6983
6984         if (found > 0) {
6985                 u64 hole_start = start;
6986                 u64 hole_len = len;
6987
6988                 em = alloc_extent_map();
6989                 if (!em) {
6990                         err = -ENOMEM;
6991                         goto out;
6992                 }
6993                 /*
6994                  * when btrfs_get_extent can't find anything it
6995                  * returns one huge hole
6996                  *
6997                  * make sure what it found really fits our range, and
6998                  * adjust to make sure it is based on the start from
6999                  * the caller
7000                  */
7001                 if (hole_em) {
7002                         u64 calc_end = extent_map_end(hole_em);
7003
7004                         if (calc_end <= start || (hole_em->start > end)) {
7005                                 free_extent_map(hole_em);
7006                                 hole_em = NULL;
7007                         } else {
7008                                 hole_start = max(hole_em->start, start);
7009                                 hole_len = calc_end - hole_start;
7010                         }
7011                 }
7012                 em->bdev = NULL;
7013                 if (hole_em && range_start > hole_start) {
7014                         /* our hole starts before our delalloc, so we
7015                          * have to return just the parts of the hole
7016                          * that go until  the delalloc starts
7017                          */
7018                         em->len = min(hole_len,
7019                                       range_start - hole_start);
7020                         em->start = hole_start;
7021                         em->orig_start = hole_start;
7022                         /*
7023                          * don't adjust block start at all,
7024                          * it is fixed at EXTENT_MAP_HOLE
7025                          */
7026                         em->block_start = hole_em->block_start;
7027                         em->block_len = hole_len;
7028                         if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags))
7029                                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
7030                 } else {
7031                         em->start = range_start;
7032                         em->len = found;
7033                         em->orig_start = range_start;
7034                         em->block_start = EXTENT_MAP_DELALLOC;
7035                         em->block_len = found;
7036                 }
7037         } else if (hole_em) {
7038                 return hole_em;
7039         }
7040 out:
7041
7042         free_extent_map(hole_em);
7043         if (err) {
7044                 free_extent_map(em);
7045                 return ERR_PTR(err);
7046         }
7047         return em;
7048 }
7049
7050 static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
7051                                                   u64 start, u64 len)
7052 {
7053         struct btrfs_root *root = BTRFS_I(inode)->root;
7054         struct extent_map *em;
7055         struct btrfs_key ins;
7056         u64 alloc_hint;
7057         int ret;
7058
7059         alloc_hint = get_extent_allocation_hint(inode, start, len);
7060         ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
7061                                    alloc_hint, &ins, 1, 1);
7062         if (ret)
7063                 return ERR_PTR(ret);
7064
7065         em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
7066                               ins.offset, ins.offset, ins.offset, 0);
7067         if (IS_ERR(em)) {
7068                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7069                 return em;
7070         }
7071
7072         ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
7073                                            ins.offset, ins.offset, 0);
7074         if (ret) {
7075                 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
7076                 free_extent_map(em);
7077                 return ERR_PTR(ret);
7078         }
7079
7080         return em;
7081 }
7082
7083 /*
7084  * returns 1 when the nocow is safe, < 1 on error, 0 if the
7085  * block must be cow'd
7086  */
7087 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
7088                               u64 *orig_start, u64 *orig_block_len,
7089                               u64 *ram_bytes)
7090 {
7091         struct btrfs_trans_handle *trans;
7092         struct btrfs_path *path;
7093         int ret;
7094         struct extent_buffer *leaf;
7095         struct btrfs_root *root = BTRFS_I(inode)->root;
7096         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
7097         struct btrfs_file_extent_item *fi;
7098         struct btrfs_key key;
7099         u64 disk_bytenr;
7100         u64 backref_offset;
7101         u64 extent_end;
7102         u64 num_bytes;
7103         int slot;
7104         int found_type;
7105         bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
7106
7107         path = btrfs_alloc_path();
7108         if (!path)
7109                 return -ENOMEM;
7110
7111         ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode),
7112                                        offset, 0);
7113         if (ret < 0)
7114                 goto out;
7115
7116         slot = path->slots[0];
7117         if (ret == 1) {
7118                 if (slot == 0) {
7119                         /* can't find the item, must cow */
7120                         ret = 0;
7121                         goto out;
7122                 }
7123                 slot--;
7124         }
7125         ret = 0;
7126         leaf = path->nodes[0];
7127         btrfs_item_key_to_cpu(leaf, &key, slot);
7128         if (key.objectid != btrfs_ino(inode) ||
7129             key.type != BTRFS_EXTENT_DATA_KEY) {
7130                 /* not our file or wrong item type, must cow */
7131                 goto out;
7132         }
7133
7134         if (key.offset > offset) {
7135                 /* Wrong offset, must cow */
7136                 goto out;
7137         }
7138
7139         fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
7140         found_type = btrfs_file_extent_type(leaf, fi);
7141         if (found_type != BTRFS_FILE_EXTENT_REG &&
7142             found_type != BTRFS_FILE_EXTENT_PREALLOC) {
7143                 /* not a regular extent, must cow */
7144                 goto out;
7145         }
7146
7147         if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
7148                 goto out;
7149
7150         extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
7151         if (extent_end <= offset)
7152                 goto out;
7153
7154         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7155         if (disk_bytenr == 0)
7156                 goto out;
7157
7158         if (btrfs_file_extent_compression(leaf, fi) ||
7159             btrfs_file_extent_encryption(leaf, fi) ||
7160             btrfs_file_extent_other_encoding(leaf, fi))
7161                 goto out;
7162
7163         backref_offset = btrfs_file_extent_offset(leaf, fi);
7164
7165         if (orig_start) {
7166                 *orig_start = key.offset - backref_offset;
7167                 *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
7168                 *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
7169         }
7170
7171         if (btrfs_extent_readonly(root, disk_bytenr))
7172                 goto out;
7173
7174         num_bytes = min(offset + *len, extent_end) - offset;
7175         if (!nocow && found_type == BTRFS_FILE_EXTENT_PREALLOC) {
7176                 u64 range_end;
7177
7178                 range_end = round_up(offset + num_bytes, root->sectorsize) - 1;
7179                 ret = test_range_bit(io_tree, offset, range_end,
7180                                      EXTENT_DELALLOC, 0, NULL);
7181                 if (ret) {
7182                         ret = -EAGAIN;
7183                         goto out;
7184                 }
7185         }
7186
7187         btrfs_release_path(path);
7188
7189         /*
7190          * look for other files referencing this extent, if we
7191          * find any we must cow
7192          */
7193         trans = btrfs_join_transaction(root);
7194         if (IS_ERR(trans)) {
7195                 ret = 0;
7196                 goto out;
7197         }
7198
7199         ret = btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
7200                                     key.offset - backref_offset, disk_bytenr);
7201         btrfs_end_transaction(trans, root);
7202         if (ret) {
7203                 ret = 0;
7204                 goto out;
7205         }
7206
7207         /*
7208          * adjust disk_bytenr and num_bytes to cover just the bytes
7209          * in this extent we are about to write.  If there
7210          * are any csums in that range we have to cow in order
7211          * to keep the csums correct
7212          */
7213         disk_bytenr += backref_offset;
7214         disk_bytenr += offset - key.offset;
7215         if (csum_exist_in_range(root, disk_bytenr, num_bytes))
7216                                 goto out;
7217         /*
7218          * all of the above have passed, it is safe to overwrite this extent
7219          * without cow
7220          */
7221         *len = num_bytes;
7222         ret = 1;
7223 out:
7224         btrfs_free_path(path);
7225         return ret;
7226 }
7227
7228 bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
7229 {
7230         struct radix_tree_root *root = &inode->i_mapping->page_tree;
7231         int found = false;
7232         void **pagep = NULL;
7233         struct page *page = NULL;
7234         int start_idx;
7235         int end_idx;
7236
7237         start_idx = start >> PAGE_CACHE_SHIFT;
7238
7239         /*
7240          * end is the last byte in the last page.  end == start is legal
7241          */
7242         end_idx = end >> PAGE_CACHE_SHIFT;
7243
7244         rcu_read_lock();
7245
7246         /* Most of the code in this while loop is lifted from
7247          * find_get_page.  It's been modified to begin searching from a
7248          * page and return just the first page found in that range.  If the
7249          * found idx is less than or equal to the end idx then we know that
7250          * a page exists.  If no pages are found or if those pages are
7251          * outside of the range then we're fine (yay!) */
7252         while (page == NULL &&
7253                radix_tree_gang_lookup_slot(root, &pagep, NULL, start_idx, 1)) {
7254                 page = radix_tree_deref_slot(pagep);
7255                 if (unlikely(!page))
7256                         break;
7257
7258                 if (radix_tree_exception(page)) {
7259                         if (radix_tree_deref_retry(page)) {
7260                                 page = NULL;
7261                                 continue;
7262                         }
7263                         /*
7264                          * Otherwise, shmem/tmpfs must be storing a swap entry
7265                          * here as an exceptional entry: so return it without
7266                          * attempting to raise page count.
7267                          */
7268                         page = NULL;
7269                         break; /* TODO: Is this relevant for this use case? */
7270                 }
7271
7272                 if (!page_cache_get_speculative(page)) {
7273                         page = NULL;
7274                         continue;
7275                 }
7276
7277                 /*
7278                  * Has the page moved?
7279                  * This is part of the lockless pagecache protocol. See
7280                  * include/linux/pagemap.h for details.
7281                  */
7282                 if (unlikely(page != *pagep)) {
7283                         page_cache_release(page);
7284                         page = NULL;
7285                 }
7286         }
7287
7288         if (page) {
7289                 if (page->index <= end_idx)
7290                         found = true;
7291                 page_cache_release(page);
7292         }
7293
7294         rcu_read_unlock();
7295         return found;
7296 }
7297
7298 static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7299                               struct extent_state **cached_state, int writing)
7300 {
7301         struct btrfs_ordered_extent *ordered;
7302         int ret = 0;
7303
7304         while (1) {
7305                 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7306                                  0, cached_state);
7307                 /*
7308                  * We're concerned with the entire range that we're going to be
7309                  * doing DIO to, so we need to make sure theres no ordered
7310                  * extents in this range.
7311                  */
7312                 ordered = btrfs_lookup_ordered_range(inode, lockstart,
7313                                                      lockend - lockstart + 1);
7314
7315                 /*
7316                  * We need to make sure there are no buffered pages in this
7317                  * range either, we could have raced between the invalidate in
7318                  * generic_file_direct_write and locking the extent.  The
7319                  * invalidate needs to happen so that reads after a write do not
7320                  * get stale data.
7321                  */
7322                 if (!ordered &&
7323                     (!writing ||
7324                      !btrfs_page_exists_in_range(inode, lockstart, lockend)))
7325                         break;
7326
7327                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7328                                      cached_state, GFP_NOFS);
7329
7330                 if (ordered) {
7331                         btrfs_start_ordered_extent(inode, ordered, 1);
7332                         btrfs_put_ordered_extent(ordered);
7333                 } else {
7334                         /* Screw you mmap */
7335                         ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
7336                         if (ret)
7337                                 break;
7338                         ret = filemap_fdatawait_range(inode->i_mapping,
7339                                                       lockstart,
7340                                                       lockend);
7341                         if (ret)
7342                                 break;
7343
7344                         /*
7345                          * If we found a page that couldn't be invalidated just
7346                          * fall back to buffered.
7347                          */
7348                         ret = invalidate_inode_pages2_range(inode->i_mapping,
7349                                         lockstart >> PAGE_CACHE_SHIFT,
7350                                         lockend >> PAGE_CACHE_SHIFT);
7351                         if (ret)
7352                                 break;
7353                 }
7354
7355                 cond_resched();
7356         }
7357
7358         return ret;
7359 }
7360
7361 static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
7362                                            u64 len, u64 orig_start,
7363                                            u64 block_start, u64 block_len,
7364                                            u64 orig_block_len, u64 ram_bytes,
7365                                            int type)
7366 {
7367         struct extent_map_tree *em_tree;
7368         struct extent_map *em;
7369         struct btrfs_root *root = BTRFS_I(inode)->root;
7370         int ret;
7371
7372         em_tree = &BTRFS_I(inode)->extent_tree;
7373         em = alloc_extent_map();
7374         if (!em)
7375                 return ERR_PTR(-ENOMEM);
7376
7377         em->start = start;
7378         em->orig_start = orig_start;
7379         em->mod_start = start;
7380         em->mod_len = len;
7381         em->len = len;
7382         em->block_len = block_len;
7383         em->block_start = block_start;
7384         em->bdev = root->fs_info->fs_devices->latest_bdev;
7385         em->orig_block_len = orig_block_len;
7386         em->ram_bytes = ram_bytes;
7387         em->generation = -1;
7388         set_bit(EXTENT_FLAG_PINNED, &em->flags);
7389         if (type == BTRFS_ORDERED_PREALLOC)
7390                 set_bit(EXTENT_FLAG_FILLING, &em->flags);
7391
7392         do {
7393                 btrfs_drop_extent_cache(inode, em->start,
7394                                 em->start + em->len - 1, 0);
7395                 write_lock(&em_tree->lock);
7396                 ret = add_extent_mapping(em_tree, em, 1);
7397                 write_unlock(&em_tree->lock);
7398         } while (ret == -EEXIST);
7399
7400         if (ret) {
7401                 free_extent_map(em);
7402                 return ERR_PTR(ret);
7403         }
7404
7405         return em;
7406 }
7407
7408 struct btrfs_dio_data {
7409         u64 outstanding_extents;
7410         u64 reserve;
7411 };
7412
7413 static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
7414                                    struct buffer_head *bh_result, int create)
7415 {
7416         struct extent_map *em;
7417         struct btrfs_root *root = BTRFS_I(inode)->root;
7418         struct extent_state *cached_state = NULL;
7419         struct btrfs_dio_data *dio_data = NULL;
7420         u64 start = iblock << inode->i_blkbits;
7421         u64 lockstart, lockend;
7422         u64 len = bh_result->b_size;
7423         int unlock_bits = EXTENT_LOCKED;
7424         int ret = 0;
7425
7426         if (create)
7427                 unlock_bits |= EXTENT_DIRTY;
7428         else
7429                 len = min_t(u64, len, root->sectorsize);
7430
7431         lockstart = start;
7432         lockend = start + len - 1;
7433
7434         if (current->journal_info) {
7435                 /*
7436                  * Need to pull our outstanding extents and set journal_info to NULL so
7437                  * that anything that needs to check if there's a transction doesn't get
7438                  * confused.
7439                  */
7440                 dio_data = current->journal_info;
7441                 current->journal_info = NULL;
7442         }
7443
7444         /*
7445          * If this errors out it's because we couldn't invalidate pagecache for
7446          * this range and we need to fallback to buffered.
7447          */
7448         if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
7449                 return -ENOTBLK;
7450
7451         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
7452         if (IS_ERR(em)) {
7453                 ret = PTR_ERR(em);
7454                 goto unlock_err;
7455         }
7456
7457         /*
7458          * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7459          * io.  INLINE is special, and we could probably kludge it in here, but
7460          * it's still buffered so for safety lets just fall back to the generic
7461          * buffered path.
7462          *
7463          * For COMPRESSED we _have_ to read the entire extent in so we can
7464          * decompress it, so there will be buffering required no matter what we
7465          * do, so go ahead and fallback to buffered.
7466          *
7467          * We return -ENOTBLK because thats what makes DIO go ahead and go back
7468          * to buffered IO.  Don't blame me, this is the price we pay for using
7469          * the generic code.
7470          */
7471         if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) ||
7472             em->block_start == EXTENT_MAP_INLINE) {
7473                 free_extent_map(em);
7474                 ret = -ENOTBLK;
7475                 goto unlock_err;
7476         }
7477
7478         /* Just a good old fashioned hole, return */
7479         if (!create && (em->block_start == EXTENT_MAP_HOLE ||
7480                         test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
7481                 free_extent_map(em);
7482                 goto unlock_err;
7483         }
7484
7485         /*
7486          * We don't allocate a new extent in the following cases
7487          *
7488          * 1) The inode is marked as NODATACOW.  In this case we'll just use the
7489          * existing extent.
7490          * 2) The extent is marked as PREALLOC.  We're good to go here and can
7491          * just use the extent.
7492          *
7493          */
7494         if (!create) {
7495                 len = min(len, em->len - (start - em->start));
7496                 lockstart = start + len;
7497                 goto unlock;
7498         }
7499
7500         if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
7501             ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
7502              em->block_start != EXTENT_MAP_HOLE)) {
7503                 int type;
7504                 u64 block_start, orig_start, orig_block_len, ram_bytes;
7505
7506                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7507                         type = BTRFS_ORDERED_PREALLOC;
7508                 else
7509                         type = BTRFS_ORDERED_NOCOW;
7510                 len = min(len, em->len - (start - em->start));
7511                 block_start = em->block_start + (start - em->start);
7512
7513                 if (can_nocow_extent(inode, start, &len, &orig_start,
7514                                      &orig_block_len, &ram_bytes) == 1) {
7515                         if (type == BTRFS_ORDERED_PREALLOC) {
7516                                 free_extent_map(em);
7517                                 em = create_pinned_em(inode, start, len,
7518                                                        orig_start,
7519                                                        block_start, len,
7520                                                        orig_block_len,
7521                                                        ram_bytes, type);
7522                                 if (IS_ERR(em)) {
7523                                         ret = PTR_ERR(em);
7524                                         goto unlock_err;
7525                                 }
7526                         }
7527
7528                         ret = btrfs_add_ordered_extent_dio(inode, start,
7529                                            block_start, len, len, type);
7530                         if (ret) {
7531                                 free_extent_map(em);
7532                                 goto unlock_err;
7533                         }
7534                         goto unlock;
7535                 }
7536         }
7537
7538         /*
7539          * this will cow the extent, reset the len in case we changed
7540          * it above
7541          */
7542         len = bh_result->b_size;
7543         free_extent_map(em);
7544         em = btrfs_new_extent_direct(inode, start, len);
7545         if (IS_ERR(em)) {
7546                 ret = PTR_ERR(em);
7547                 goto unlock_err;
7548         }
7549         len = min(len, em->len - (start - em->start));
7550 unlock:
7551         bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
7552                 inode->i_blkbits;
7553         bh_result->b_size = len;
7554         bh_result->b_bdev = em->bdev;
7555         set_buffer_mapped(bh_result);
7556         if (create) {
7557                 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
7558                         set_buffer_new(bh_result);
7559
7560                 /*
7561                  * Need to update the i_size under the extent lock so buffered
7562                  * readers will get the updated i_size when we unlock.
7563                  */
7564                 if (start + len > i_size_read(inode))
7565                         i_size_write(inode, start + len);
7566
7567                 /*
7568                  * If we have an outstanding_extents count still set then we're
7569                  * within our reservation, otherwise we need to adjust our inode
7570                  * counter appropriately.
7571                  */
7572                 if (dio_data->outstanding_extents) {
7573                         (dio_data->outstanding_extents)--;
7574                 } else {
7575                         spin_lock(&BTRFS_I(inode)->lock);
7576                         BTRFS_I(inode)->outstanding_extents++;
7577                         spin_unlock(&BTRFS_I(inode)->lock);
7578                 }
7579
7580                 btrfs_free_reserved_data_space(inode, len);
7581                 WARN_ON(dio_data->reserve < len);
7582                 dio_data->reserve -= len;
7583                 current->journal_info = dio_data;
7584         }
7585
7586         /*
7587          * In the case of write we need to clear and unlock the entire range,
7588          * in the case of read we need to unlock only the end area that we
7589          * aren't using if there is any left over space.
7590          */
7591         if (lockstart < lockend) {
7592                 clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
7593                                  lockend, unlock_bits, 1, 0,
7594                                  &cached_state, GFP_NOFS);
7595         } else {
7596                 free_extent_state(cached_state);
7597         }
7598
7599         free_extent_map(em);
7600
7601         return 0;
7602
7603 unlock_err:
7604         clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
7605                          unlock_bits, 1, 0, &cached_state, GFP_NOFS);
7606         if (dio_data)
7607                 current->journal_info = dio_data;
7608         return ret;
7609 }
7610
7611 static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
7612                                         int rw, int mirror_num)
7613 {
7614         struct btrfs_root *root = BTRFS_I(inode)->root;
7615         int ret;
7616
7617         BUG_ON(rw & REQ_WRITE);
7618
7619         bio_get(bio);
7620
7621         ret = btrfs_bio_wq_end_io(root->fs_info, bio,
7622                                   BTRFS_WQ_ENDIO_DIO_REPAIR);
7623         if (ret)
7624                 goto err;
7625
7626         ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
7627 err:
7628         bio_put(bio);
7629         return ret;
7630 }
7631
7632 static int btrfs_check_dio_repairable(struct inode *inode,
7633                                       struct bio *failed_bio,
7634                                       struct io_failure_record *failrec,
7635                                       int failed_mirror)
7636 {
7637         int num_copies;
7638
7639         num_copies = btrfs_num_copies(BTRFS_I(inode)->root->fs_info,
7640                                       failrec->logical, failrec->len);
7641         if (num_copies == 1) {
7642                 /*
7643                  * we only have a single copy of the data, so don't bother with
7644                  * all the retry and error correction code that follows. no
7645                  * matter what the error is, it is very likely to persist.
7646                  */
7647                 pr_debug("Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d\n",
7648                          num_copies, failrec->this_mirror, failed_mirror);
7649                 return 0;
7650         }
7651
7652         failrec->failed_mirror = failed_mirror;
7653         failrec->this_mirror++;
7654         if (failrec->this_mirror == failed_mirror)
7655                 failrec->this_mirror++;
7656
7657         if (failrec->this_mirror > num_copies) {
7658                 pr_debug("Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d\n",
7659                          num_copies, failrec->this_mirror, failed_mirror);
7660                 return 0;
7661         }
7662
7663         return 1;
7664 }
7665
7666 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
7667                           struct page *page, u64 start, u64 end,
7668                           int failed_mirror, bio_end_io_t *repair_endio,
7669                           void *repair_arg)
7670 {
7671         struct io_failure_record *failrec;
7672         struct bio *bio;
7673         int isector;
7674         int read_mode;
7675         int ret;
7676
7677         BUG_ON(failed_bio->bi_rw & REQ_WRITE);
7678
7679         ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
7680         if (ret)
7681                 return ret;
7682
7683         ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
7684                                          failed_mirror);
7685         if (!ret) {
7686                 free_io_failure(inode, failrec);
7687                 return -EIO;
7688         }
7689
7690         if (failed_bio->bi_vcnt > 1)
7691                 read_mode = READ_SYNC | REQ_FAILFAST_DEV;
7692         else
7693                 read_mode = READ_SYNC;
7694
7695         isector = start - btrfs_io_bio(failed_bio)->logical;
7696         isector >>= inode->i_sb->s_blocksize_bits;
7697         bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
7698                                       0, isector, repair_endio, repair_arg);
7699         if (!bio) {
7700                 free_io_failure(inode, failrec);
7701                 return -EIO;
7702         }
7703
7704         btrfs_debug(BTRFS_I(inode)->root->fs_info,
7705                     "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7706                     read_mode, failrec->this_mirror, failrec->in_validation);
7707
7708         ret = submit_dio_repair_bio(inode, bio, read_mode,
7709                                     failrec->this_mirror);
7710         if (ret) {
7711                 free_io_failure(inode, failrec);
7712                 bio_put(bio);
7713         }
7714
7715         return ret;
7716 }
7717
7718 struct btrfs_retry_complete {
7719         struct completion done;
7720         struct inode *inode;
7721         u64 start;
7722         int uptodate;
7723 };
7724
7725 static void btrfs_retry_endio_nocsum(struct bio *bio, int err)
7726 {
7727         struct btrfs_retry_complete *done = bio->bi_private;
7728         struct bio_vec *bvec;
7729         int i;
7730
7731         if (err)
7732                 goto end;
7733
7734         done->uptodate = 1;
7735         bio_for_each_segment_all(bvec, bio, i)
7736                 clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
7737 end:
7738         complete(&done->done);
7739         bio_put(bio);
7740 }
7741
7742 static int __btrfs_correct_data_nocsum(struct inode *inode,
7743                                        struct btrfs_io_bio *io_bio)
7744 {
7745         struct bio_vec *bvec;
7746         struct btrfs_retry_complete done;
7747         u64 start;
7748         int i;
7749         int ret;
7750
7751         start = io_bio->logical;
7752         done.inode = inode;
7753
7754         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7755 try_again:
7756                 done.uptodate = 0;
7757                 done.start = start;
7758                 init_completion(&done.done);
7759
7760                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7761                                      start + bvec->bv_len - 1,
7762                                      io_bio->mirror_num,
7763                                      btrfs_retry_endio_nocsum, &done);
7764                 if (ret)
7765                         return ret;
7766
7767                 wait_for_completion(&done.done);
7768
7769                 if (!done.uptodate) {
7770                         /* We might have another mirror, so try again */
7771                         goto try_again;
7772                 }
7773
7774                 start += bvec->bv_len;
7775         }
7776
7777         return 0;
7778 }
7779
7780 static void btrfs_retry_endio(struct bio *bio, int err)
7781 {
7782         struct btrfs_retry_complete *done = bio->bi_private;
7783         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7784         struct bio_vec *bvec;
7785         int uptodate;
7786         int ret;
7787         int i;
7788
7789         if (err)
7790                 goto end;
7791
7792         uptodate = 1;
7793         bio_for_each_segment_all(bvec, bio, i) {
7794                 ret = __readpage_endio_check(done->inode, io_bio, i,
7795                                              bvec->bv_page, 0,
7796                                              done->start, bvec->bv_len);
7797                 if (!ret)
7798                         clean_io_failure(done->inode, done->start,
7799                                          bvec->bv_page, 0);
7800                 else
7801                         uptodate = 0;
7802         }
7803
7804         done->uptodate = uptodate;
7805 end:
7806         complete(&done->done);
7807         bio_put(bio);
7808 }
7809
7810 static int __btrfs_subio_endio_read(struct inode *inode,
7811                                     struct btrfs_io_bio *io_bio, int err)
7812 {
7813         struct bio_vec *bvec;
7814         struct btrfs_retry_complete done;
7815         u64 start;
7816         u64 offset = 0;
7817         int i;
7818         int ret;
7819
7820         err = 0;
7821         start = io_bio->logical;
7822         done.inode = inode;
7823
7824         bio_for_each_segment_all(bvec, &io_bio->bio, i) {
7825                 ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
7826                                              0, start, bvec->bv_len);
7827                 if (likely(!ret))
7828                         goto next;
7829 try_again:
7830                 done.uptodate = 0;
7831                 done.start = start;
7832                 init_completion(&done.done);
7833
7834                 ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
7835                                      start + bvec->bv_len - 1,
7836                                      io_bio->mirror_num,
7837                                      btrfs_retry_endio, &done);
7838                 if (ret) {
7839                         err = ret;
7840                         goto next;
7841                 }
7842
7843                 wait_for_completion(&done.done);
7844
7845                 if (!done.uptodate) {
7846                         /* We might have another mirror, so try again */
7847                         goto try_again;
7848                 }
7849 next:
7850                 offset += bvec->bv_len;
7851                 start += bvec->bv_len;
7852         }
7853
7854         return err;
7855 }
7856
7857 static int btrfs_subio_endio_read(struct inode *inode,
7858                                   struct btrfs_io_bio *io_bio, int err)
7859 {
7860         bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7861
7862         if (skip_csum) {
7863                 if (unlikely(err))
7864                         return __btrfs_correct_data_nocsum(inode, io_bio);
7865                 else
7866                         return 0;
7867         } else {
7868                 return __btrfs_subio_endio_read(inode, io_bio, err);
7869         }
7870 }
7871
7872 static void btrfs_endio_direct_read(struct bio *bio, int err)
7873 {
7874         struct btrfs_dio_private *dip = bio->bi_private;
7875         struct inode *inode = dip->inode;
7876         struct bio *dio_bio;
7877         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
7878
7879         if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
7880                 err = btrfs_subio_endio_read(inode, io_bio, err);
7881
7882         unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
7883                       dip->logical_offset + dip->bytes - 1);
7884         dio_bio = dip->dio_bio;
7885
7886         kfree(dip);
7887
7888         /* If we had a csum failure make sure to clear the uptodate flag */
7889         if (err)
7890                 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
7891         dio_end_io(dio_bio, err);
7892
7893         if (io_bio->end_io)
7894                 io_bio->end_io(io_bio, err);
7895         bio_put(bio);
7896 }
7897
7898 static void btrfs_endio_direct_write(struct bio *bio, int err)
7899 {
7900         struct btrfs_dio_private *dip = bio->bi_private;
7901         struct inode *inode = dip->inode;
7902         struct btrfs_root *root = BTRFS_I(inode)->root;
7903         struct btrfs_ordered_extent *ordered = NULL;
7904         u64 ordered_offset = dip->logical_offset;
7905         u64 ordered_bytes = dip->bytes;
7906         struct bio *dio_bio;
7907         int ret;
7908
7909 again:
7910         ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
7911                                                    &ordered_offset,
7912                                                    ordered_bytes, !err);
7913         if (!ret)
7914                 goto out_test;
7915
7916         btrfs_init_work(&ordered->work, btrfs_endio_write_helper,
7917                         finish_ordered_fn, NULL, NULL);
7918         btrfs_queue_work(root->fs_info->endio_write_workers,
7919                          &ordered->work);
7920 out_test:
7921         /*
7922          * our bio might span multiple ordered extents.  If we haven't
7923          * completed the accounting for the whole dio, go back and try again
7924          */
7925         if (ordered_offset < dip->logical_offset + dip->bytes) {
7926                 ordered_bytes = dip->logical_offset + dip->bytes -
7927                         ordered_offset;
7928                 ordered = NULL;
7929                 goto again;
7930         }
7931         dio_bio = dip->dio_bio;
7932
7933         kfree(dip);
7934
7935         /* If we had an error make sure to clear the uptodate flag */
7936         if (err)
7937                 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
7938         dio_end_io(dio_bio, err);
7939         bio_put(bio);
7940 }
7941
7942 static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
7943                                     struct bio *bio, int mirror_num,
7944                                     unsigned long bio_flags, u64 offset)
7945 {
7946         int ret;
7947         struct btrfs_root *root = BTRFS_I(inode)->root;
7948         ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
7949         BUG_ON(ret); /* -ENOMEM */
7950         return 0;
7951 }
7952
7953 static void btrfs_end_dio_bio(struct bio *bio, int err)
7954 {
7955         struct btrfs_dio_private *dip = bio->bi_private;
7956
7957         if (err)
7958                 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
7959                            "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
7960                            btrfs_ino(dip->inode), bio->bi_rw,
7961                            (unsigned long long)bio->bi_iter.bi_sector,
7962                            bio->bi_iter.bi_size, err);
7963
7964         if (dip->subio_endio)
7965                 err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
7966
7967         if (err) {
7968                 dip->errors = 1;
7969
7970                 /*
7971                  * before atomic variable goto zero, we must make sure
7972                  * dip->errors is perceived to be set.
7973                  */
7974                 smp_mb__before_atomic();
7975         }
7976
7977         /* if there are more bios still pending for this dio, just exit */
7978         if (!atomic_dec_and_test(&dip->pending_bios))
7979                 goto out;
7980
7981         if (dip->errors) {
7982                 bio_io_error(dip->orig_bio);
7983         } else {
7984                 set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
7985                 bio_endio(dip->orig_bio, 0);
7986         }
7987 out:
7988         bio_put(bio);
7989 }
7990
7991 static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
7992                                        u64 first_sector, gfp_t gfp_flags)
7993 {
7994         int nr_vecs = bio_get_nr_vecs(bdev);
7995         struct bio *bio;
7996         bio = btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
7997         if (bio)
7998                 bio_associate_current(bio);
7999         return bio;
8000 }
8001
8002 static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
8003                                                  struct inode *inode,
8004                                                  struct btrfs_dio_private *dip,
8005                                                  struct bio *bio,
8006                                                  u64 file_offset)
8007 {
8008         struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
8009         struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
8010         int ret;
8011
8012         /*
8013          * We load all the csum data we need when we submit
8014          * the first bio to reduce the csum tree search and
8015          * contention.
8016          */
8017         if (dip->logical_offset == file_offset) {
8018                 ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
8019                                                 file_offset);
8020                 if (ret)
8021                         return ret;
8022         }
8023
8024         if (bio == dip->orig_bio)
8025                 return 0;
8026
8027         file_offset -= dip->logical_offset;
8028         file_offset >>= inode->i_sb->s_blocksize_bits;
8029         io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
8030
8031         return 0;
8032 }
8033
8034 static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
8035                                          int rw, u64 file_offset, int skip_sum,
8036                                          int async_submit)
8037 {
8038         struct btrfs_dio_private *dip = bio->bi_private;
8039         int write = rw & REQ_WRITE;
8040         struct btrfs_root *root = BTRFS_I(inode)->root;
8041         int ret;
8042
8043         if (async_submit)
8044                 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
8045
8046         bio_get(bio);
8047
8048         if (!write) {
8049                 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
8050                                 BTRFS_WQ_ENDIO_DATA);
8051                 if (ret)
8052                         goto err;
8053         }
8054
8055         if (skip_sum)
8056                 goto map;
8057
8058         if (write && async_submit) {
8059                 ret = btrfs_wq_submit_bio(root->fs_info,
8060                                    inode, rw, bio, 0, 0,
8061                                    file_offset,
8062                                    __btrfs_submit_bio_start_direct_io,
8063                                    __btrfs_submit_bio_done);
8064                 goto err;
8065         } else if (write) {
8066                 /*
8067                  * If we aren't doing async submit, calculate the csum of the
8068                  * bio now.
8069                  */
8070                 ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
8071                 if (ret)
8072                         goto err;
8073         } else {
8074                 ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
8075                                                      file_offset);
8076                 if (ret)
8077                         goto err;
8078         }
8079 map:
8080         ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
8081 err:
8082         bio_put(bio);
8083         return ret;
8084 }
8085
8086 static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
8087                                     int skip_sum)
8088 {
8089         struct inode *inode = dip->inode;
8090         struct btrfs_root *root = BTRFS_I(inode)->root;
8091         struct bio *bio;
8092         struct bio *orig_bio = dip->orig_bio;
8093         struct bio_vec *bvec = orig_bio->bi_io_vec;
8094         u64 start_sector = orig_bio->bi_iter.bi_sector;
8095         u64 file_offset = dip->logical_offset;
8096         u64 submit_len = 0;
8097         u64 map_length;
8098         int nr_pages = 0;
8099         int ret;
8100         int async_submit = 0;
8101
8102         map_length = orig_bio->bi_iter.bi_size;
8103         ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
8104                               &map_length, NULL, 0);
8105         if (ret)
8106                 return -EIO;
8107
8108         if (map_length >= orig_bio->bi_iter.bi_size) {
8109                 bio = orig_bio;
8110                 dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
8111                 goto submit;
8112         }
8113
8114         /* async crcs make it difficult to collect full stripe writes. */
8115         if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
8116                 async_submit = 0;
8117         else
8118                 async_submit = 1;
8119
8120         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
8121         if (!bio)
8122                 return -ENOMEM;
8123
8124         bio->bi_private = dip;
8125         bio->bi_end_io = btrfs_end_dio_bio;
8126         btrfs_io_bio(bio)->logical = file_offset;
8127         atomic_inc(&dip->pending_bios);
8128
8129         while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
8130                 if (map_length < submit_len + bvec->bv_len ||
8131                     bio_add_page(bio, bvec->bv_page, bvec->bv_len,
8132                                  bvec->bv_offset) < bvec->bv_len) {
8133                         /*
8134                          * inc the count before we submit the bio so
8135                          * we know the end IO handler won't happen before
8136                          * we inc the count. Otherwise, the dip might get freed
8137                          * before we're done setting it up
8138                          */
8139                         atomic_inc(&dip->pending_bios);
8140                         ret = __btrfs_submit_dio_bio(bio, inode, rw,
8141                                                      file_offset, skip_sum,
8142                                                      async_submit);
8143                         if (ret) {
8144                                 bio_put(bio);
8145                                 atomic_dec(&dip->pending_bios);
8146                                 goto out_err;
8147                         }
8148
8149                         start_sector += submit_len >> 9;
8150                         file_offset += submit_len;
8151
8152                         submit_len = 0;
8153                         nr_pages = 0;
8154
8155                         bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
8156                                                   start_sector, GFP_NOFS);
8157                         if (!bio)
8158                                 goto out_err;
8159                         bio->bi_private = dip;
8160                         bio->bi_end_io = btrfs_end_dio_bio;
8161                         btrfs_io_bio(bio)->logical = file_offset;
8162
8163                         map_length = orig_bio->bi_iter.bi_size;
8164                         ret = btrfs_map_block(root->fs_info, rw,
8165                                               start_sector << 9,
8166                                               &map_length, NULL, 0);
8167                         if (ret) {
8168                                 bio_put(bio);
8169                                 goto out_err;
8170                         }
8171                 } else {
8172                         submit_len += bvec->bv_len;
8173                         nr_pages++;
8174                         bvec++;
8175                 }
8176         }
8177
8178 submit:
8179         ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
8180                                      async_submit);
8181         if (!ret)
8182                 return 0;
8183
8184         bio_put(bio);
8185 out_err:
8186         dip->errors = 1;
8187         /*
8188          * before atomic variable goto zero, we must
8189          * make sure dip->errors is perceived to be set.
8190          */
8191         smp_mb__before_atomic();
8192         if (atomic_dec_and_test(&dip->pending_bios))
8193                 bio_io_error(dip->orig_bio);
8194
8195         /* bio_end_io() will handle error, so we needn't return it */
8196         return 0;
8197 }
8198
8199 static void btrfs_submit_direct(int rw, struct bio *dio_bio,
8200                                 struct inode *inode, loff_t file_offset)
8201 {
8202         struct btrfs_dio_private *dip = NULL;
8203         struct bio *io_bio = NULL;
8204         struct btrfs_io_bio *btrfs_bio;
8205         int skip_sum;
8206         int write = rw & REQ_WRITE;
8207         int ret = 0;
8208
8209         skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
8210
8211         io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
8212         if (!io_bio) {
8213                 ret = -ENOMEM;
8214                 goto free_ordered;
8215         }
8216
8217         dip = kzalloc(sizeof(*dip), GFP_NOFS);
8218         if (!dip) {
8219                 ret = -ENOMEM;
8220                 goto free_ordered;
8221         }
8222
8223         dip->private = dio_bio->bi_private;
8224         dip->inode = inode;
8225         dip->logical_offset = file_offset;
8226         dip->bytes = dio_bio->bi_iter.bi_size;
8227         dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
8228         io_bio->bi_private = dip;
8229         dip->orig_bio = io_bio;
8230         dip->dio_bio = dio_bio;
8231         atomic_set(&dip->pending_bios, 0);
8232         btrfs_bio = btrfs_io_bio(io_bio);
8233         btrfs_bio->logical = file_offset;
8234
8235         if (write) {
8236                 io_bio->bi_end_io = btrfs_endio_direct_write;
8237         } else {
8238                 io_bio->bi_end_io = btrfs_endio_direct_read;
8239                 dip->subio_endio = btrfs_subio_endio_read;
8240         }
8241
8242         ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
8243         if (!ret)
8244                 return;
8245
8246         if (btrfs_bio->end_io)
8247                 btrfs_bio->end_io(btrfs_bio, ret);
8248
8249 free_ordered:
8250         /*
8251          * If we arrived here it means either we failed to submit the dip
8252          * or we either failed to clone the dio_bio or failed to allocate the
8253          * dip. If we cloned the dio_bio and allocated the dip, we can just
8254          * call bio_endio against our io_bio so that we get proper resource
8255          * cleanup if we fail to submit the dip, otherwise, we must do the
8256          * same as btrfs_endio_direct_[write|read] because we can't call these
8257          * callbacks - they require an allocated dip and a clone of dio_bio.
8258          */
8259         if (io_bio && dip) {
8260                 bio_endio(io_bio, ret);
8261                 /*
8262                  * The end io callbacks free our dip, do the final put on io_bio
8263                  * and all the cleanup and final put for dio_bio (through
8264                  * dio_end_io()).
8265                  */
8266                 dip = NULL;
8267                 io_bio = NULL;
8268         } else {
8269                 if (write) {
8270                         struct btrfs_ordered_extent *ordered;
8271
8272                         ordered = btrfs_lookup_ordered_extent(inode,
8273                                                               file_offset);
8274                         set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
8275                         /*
8276                          * Decrements our ref on the ordered extent and removes
8277                          * the ordered extent from the inode's ordered tree,
8278                          * doing all the proper resource cleanup such as for the
8279                          * reserved space and waking up any waiters for this
8280                          * ordered extent (through btrfs_remove_ordered_extent).
8281                          */
8282                         btrfs_finish_ordered_io(ordered);
8283                 } else {
8284                         unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
8285                               file_offset + dio_bio->bi_iter.bi_size - 1);
8286                 }
8287                 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
8288                 /*
8289                  * Releases and cleans up our dio_bio, no need to bio_put()
8290                  * nor bio_endio()/bio_io_error() against dio_bio.
8291                  */
8292                 dio_end_io(dio_bio, ret);
8293         }
8294         if (io_bio)
8295                 bio_put(io_bio);
8296         kfree(dip);
8297 }
8298
8299 static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
8300                         const struct iov_iter *iter, loff_t offset)
8301 {
8302         int seg;
8303         int i;
8304         unsigned blocksize_mask = root->sectorsize - 1;
8305         ssize_t retval = -EINVAL;
8306
8307         if (offset & blocksize_mask)
8308                 goto out;
8309
8310         if (iov_iter_alignment(iter) & blocksize_mask)
8311                 goto out;
8312
8313         /* If this is a write we don't need to check anymore */
8314         if (iov_iter_rw(iter) == WRITE)
8315                 return 0;
8316         /*
8317          * Check to make sure we don't have duplicate iov_base's in this
8318          * iovec, if so return EINVAL, otherwise we'll get csum errors
8319          * when reading back.
8320          */
8321         for (seg = 0; seg < iter->nr_segs; seg++) {
8322                 for (i = seg + 1; i < iter->nr_segs; i++) {
8323                         if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
8324                                 goto out;
8325                 }
8326         }
8327         retval = 0;
8328 out:
8329         return retval;
8330 }
8331
8332 static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
8333                                loff_t offset)
8334 {
8335         struct file *file = iocb->ki_filp;
8336         struct inode *inode = file->f_mapping->host;
8337         struct btrfs_root *root = BTRFS_I(inode)->root;
8338         struct btrfs_dio_data dio_data = { 0 };
8339         size_t count = 0;
8340         int flags = 0;
8341         bool wakeup = true;
8342         bool relock = false;
8343         ssize_t ret;
8344
8345         if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
8346                 return 0;
8347
8348         inode_dio_begin(inode);
8349         smp_mb__after_atomic();
8350
8351         /*
8352          * The generic stuff only does filemap_write_and_wait_range, which
8353          * isn't enough if we've written compressed pages to this area, so
8354          * we need to flush the dirty pages again to make absolutely sure
8355          * that any outstanding dirty pages are on disk.
8356          */
8357         count = iov_iter_count(iter);
8358         if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
8359                      &BTRFS_I(inode)->runtime_flags))
8360                 filemap_fdatawrite_range(inode->i_mapping, offset,
8361                                          offset + count - 1);
8362
8363         if (iov_iter_rw(iter) == WRITE) {
8364                 /*
8365                  * If the write DIO is beyond the EOF, we need update
8366                  * the isize, but it is protected by i_mutex. So we can
8367                  * not unlock the i_mutex at this case.
8368                  */
8369                 if (offset + count <= inode->i_size) {
8370                         mutex_unlock(&inode->i_mutex);
8371                         relock = true;
8372                 }
8373                 ret = btrfs_delalloc_reserve_space(inode, count);
8374                 if (ret)
8375                         goto out;
8376                 dio_data.outstanding_extents = div64_u64(count +
8377                                                 BTRFS_MAX_EXTENT_SIZE - 1,
8378                                                 BTRFS_MAX_EXTENT_SIZE);
8379
8380                 /*
8381                  * We need to know how many extents we reserved so that we can
8382                  * do the accounting properly if we go over the number we
8383                  * originally calculated.  Abuse current->journal_info for this.
8384                  */
8385                 dio_data.reserve = round_up(count, root->sectorsize);
8386                 current->journal_info = &dio_data;
8387         } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
8388                                      &BTRFS_I(inode)->runtime_flags)) {
8389                 inode_dio_end(inode);
8390                 flags = DIO_LOCKING | DIO_SKIP_HOLES;
8391                 wakeup = false;
8392         }
8393
8394         ret = __blockdev_direct_IO(iocb, inode,
8395                                    BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
8396                                    iter, offset, btrfs_get_blocks_direct, NULL,
8397                                    btrfs_submit_direct, flags);
8398         if (iov_iter_rw(iter) == WRITE) {
8399                 current->journal_info = NULL;
8400                 if (ret < 0 && ret != -EIOCBQUEUED) {
8401                         if (dio_data.reserve)
8402                                 btrfs_delalloc_release_space(inode,
8403                                                         dio_data.reserve);
8404                 } else if (ret >= 0 && (size_t)ret < count)
8405                         btrfs_delalloc_release_space(inode,
8406                                                      count - (size_t)ret);
8407         }
8408 out:
8409         if (wakeup)
8410                 inode_dio_end(inode);
8411         if (relock)
8412                 mutex_lock(&inode->i_mutex);
8413
8414         return ret;
8415 }
8416
8417 #define BTRFS_FIEMAP_FLAGS      (FIEMAP_FLAG_SYNC)
8418
8419 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
8420                 __u64 start, __u64 len)
8421 {
8422         int     ret;
8423
8424         ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
8425         if (ret)
8426                 return ret;
8427
8428         return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
8429 }
8430
8431 int btrfs_readpage(struct file *file, struct page *page)
8432 {
8433         struct extent_io_tree *tree;
8434         tree = &BTRFS_I(page->mapping->host)->io_tree;
8435         return extent_read_full_page(tree, page, btrfs_get_extent, 0);
8436 }
8437
8438 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
8439 {
8440         struct extent_io_tree *tree;
8441
8442
8443         if (current->flags & PF_MEMALLOC) {
8444                 redirty_page_for_writepage(wbc, page);
8445                 unlock_page(page);
8446                 return 0;
8447         }
8448         tree = &BTRFS_I(page->mapping->host)->io_tree;
8449         return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
8450 }
8451
8452 static int btrfs_writepages(struct address_space *mapping,
8453                             struct writeback_control *wbc)
8454 {
8455         struct extent_io_tree *tree;
8456
8457         tree = &BTRFS_I(mapping->host)->io_tree;
8458         return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
8459 }
8460
8461 static int
8462 btrfs_readpages(struct file *file, struct address_space *mapping,
8463                 struct list_head *pages, unsigned nr_pages)
8464 {
8465         struct extent_io_tree *tree;
8466         tree = &BTRFS_I(mapping->host)->io_tree;
8467         return extent_readpages(tree, mapping, pages, nr_pages,
8468                                 btrfs_get_extent);
8469 }
8470 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8471 {
8472         struct extent_io_tree *tree;
8473         struct extent_map_tree *map;
8474         int ret;
8475
8476         tree = &BTRFS_I(page->mapping->host)->io_tree;
8477         map = &BTRFS_I(page->mapping->host)->extent_tree;
8478         ret = try_release_extent_mapping(map, tree, page, gfp_flags);
8479         if (ret == 1) {
8480                 ClearPagePrivate(page);
8481                 set_page_private(page, 0);
8482                 page_cache_release(page);
8483         }
8484         return ret;
8485 }
8486
8487 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
8488 {
8489         if (PageWriteback(page) || PageDirty(page))
8490                 return 0;
8491         return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
8492 }
8493
8494 static void btrfs_invalidatepage(struct page *page, unsigned int offset,
8495                                  unsigned int length)
8496 {
8497         struct inode *inode = page->mapping->host;
8498         struct extent_io_tree *tree;
8499         struct btrfs_ordered_extent *ordered;
8500         struct extent_state *cached_state = NULL;
8501         u64 page_start = page_offset(page);
8502         u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
8503         int inode_evicting = inode->i_state & I_FREEING;
8504
8505         /*
8506          * we have the page locked, so new writeback can't start,
8507          * and the dirty bit won't be cleared while we are here.
8508          *
8509          * Wait for IO on this page so that we can safely clear
8510          * the PagePrivate2 bit and do ordered accounting
8511          */
8512         wait_on_page_writeback(page);
8513
8514         tree = &BTRFS_I(inode)->io_tree;
8515         if (offset) {
8516                 btrfs_releasepage(page, GFP_NOFS);
8517                 return;
8518         }
8519
8520         if (!inode_evicting)
8521                 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
8522         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8523         if (ordered) {
8524                 /*
8525                  * IO on this page will never be started, so we need
8526                  * to account for any ordered extents now
8527                  */
8528                 if (!inode_evicting)
8529                         clear_extent_bit(tree, page_start, page_end,
8530                                          EXTENT_DIRTY | EXTENT_DELALLOC |
8531                                          EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
8532                                          EXTENT_DEFRAG, 1, 0, &cached_state,
8533                                          GFP_NOFS);
8534                 /*
8535                  * whoever cleared the private bit is responsible
8536                  * for the finish_ordered_io
8537                  */
8538                 if (TestClearPagePrivate2(page)) {
8539                         struct btrfs_ordered_inode_tree *tree;
8540                         u64 new_len;
8541
8542                         tree = &BTRFS_I(inode)->ordered_tree;
8543
8544                         spin_lock_irq(&tree->lock);
8545                         set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
8546                         new_len = page_start - ordered->file_offset;
8547                         if (new_len < ordered->truncated_len)
8548                                 ordered->truncated_len = new_len;
8549                         spin_unlock_irq(&tree->lock);
8550
8551                         if (btrfs_dec_test_ordered_pending(inode, &ordered,
8552                                                            page_start,
8553                                                            PAGE_CACHE_SIZE, 1))
8554                                 btrfs_finish_ordered_io(ordered);
8555                 }
8556                 btrfs_put_ordered_extent(ordered);
8557                 if (!inode_evicting) {
8558                         cached_state = NULL;
8559                         lock_extent_bits(tree, page_start, page_end, 0,
8560                                          &cached_state);
8561                 }
8562         }
8563
8564         if (!inode_evicting) {
8565                 clear_extent_bit(tree, page_start, page_end,
8566                                  EXTENT_LOCKED | EXTENT_DIRTY |
8567                                  EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
8568                                  EXTENT_DEFRAG, 1, 1,
8569                                  &cached_state, GFP_NOFS);
8570
8571                 __btrfs_releasepage(page, GFP_NOFS);
8572         }
8573
8574         ClearPageChecked(page);
8575         if (PagePrivate(page)) {
8576                 ClearPagePrivate(page);
8577                 set_page_private(page, 0);
8578                 page_cache_release(page);
8579         }
8580 }
8581
8582 /*
8583  * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8584  * called from a page fault handler when a page is first dirtied. Hence we must
8585  * be careful to check for EOF conditions here. We set the page up correctly
8586  * for a written page which means we get ENOSPC checking when writing into
8587  * holes and correct delalloc and unwritten extent mapping on filesystems that
8588  * support these features.
8589  *
8590  * We are not allowed to take the i_mutex here so we have to play games to
8591  * protect against truncate races as the page could now be beyond EOF.  Because
8592  * vmtruncate() writes the inode size before removing pages, once we have the
8593  * page lock we can determine safely if the page is beyond EOF. If it is not
8594  * beyond EOF, then the page is guaranteed safe against truncation until we
8595  * unlock the page.
8596  */
8597 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
8598 {
8599         struct page *page = vmf->page;
8600         struct inode *inode = file_inode(vma->vm_file);
8601         struct btrfs_root *root = BTRFS_I(inode)->root;
8602         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
8603         struct btrfs_ordered_extent *ordered;
8604         struct extent_state *cached_state = NULL;
8605         char *kaddr;
8606         unsigned long zero_start;
8607         loff_t size;
8608         int ret;
8609         int reserved = 0;
8610         u64 page_start;
8611         u64 page_end;
8612
8613         sb_start_pagefault(inode->i_sb);
8614         ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
8615         if (!ret) {
8616                 ret = file_update_time(vma->vm_file);
8617                 reserved = 1;
8618         }
8619         if (ret) {
8620                 if (ret == -ENOMEM)
8621                         ret = VM_FAULT_OOM;
8622                 else /* -ENOSPC, -EIO, etc */
8623                         ret = VM_FAULT_SIGBUS;
8624                 if (reserved)
8625                         goto out;
8626                 goto out_noreserve;
8627         }
8628
8629         ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
8630 again:
8631         lock_page(page);
8632         size = i_size_read(inode);
8633         page_start = page_offset(page);
8634         page_end = page_start + PAGE_CACHE_SIZE - 1;
8635
8636         if ((page->mapping != inode->i_mapping) ||
8637             (page_start >= size)) {
8638                 /* page got truncated out from underneath us */
8639                 goto out_unlock;
8640         }
8641         wait_on_page_writeback(page);
8642
8643         lock_extent_bits(io_tree, page_start, page_end, 0, &cached_state);
8644         set_page_extent_mapped(page);
8645
8646         /*
8647          * we can't set the delalloc bits if there are pending ordered
8648          * extents.  Drop our locks and wait for them to finish
8649          */
8650         ordered = btrfs_lookup_ordered_extent(inode, page_start);
8651         if (ordered) {
8652                 unlock_extent_cached(io_tree, page_start, page_end,
8653                                      &cached_state, GFP_NOFS);
8654                 unlock_page(page);
8655                 btrfs_start_ordered_extent(inode, ordered, 1);
8656                 btrfs_put_ordered_extent(ordered);
8657                 goto again;
8658         }
8659
8660         /*
8661          * XXX - page_mkwrite gets called every time the page is dirtied, even
8662          * if it was already dirty, so for space accounting reasons we need to
8663          * clear any delalloc bits for the range we are fixing to save.  There
8664          * is probably a better way to do this, but for now keep consistent with
8665          * prepare_pages in the normal write path.
8666          */
8667         clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
8668                           EXTENT_DIRTY | EXTENT_DELALLOC |
8669                           EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
8670                           0, 0, &cached_state, GFP_NOFS);
8671
8672         ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
8673                                         &cached_state);
8674         if (ret) {
8675                 unlock_extent_cached(io_tree, page_start, page_end,
8676                                      &cached_state, GFP_NOFS);
8677                 ret = VM_FAULT_SIGBUS;
8678                 goto out_unlock;
8679         }
8680         ret = 0;
8681
8682         /* page is wholly or partially inside EOF */
8683         if (page_start + PAGE_CACHE_SIZE > size)
8684                 zero_start = size & ~PAGE_CACHE_MASK;
8685         else
8686                 zero_start = PAGE_CACHE_SIZE;
8687
8688         if (zero_start != PAGE_CACHE_SIZE) {
8689                 kaddr = kmap(page);
8690                 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
8691                 flush_dcache_page(page);
8692                 kunmap(page);
8693         }
8694         ClearPageChecked(page);
8695         set_page_dirty(page);
8696         SetPageUptodate(page);
8697
8698         BTRFS_I(inode)->last_trans = root->fs_info->generation;
8699         BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
8700         BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
8701
8702         unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS);
8703
8704 out_unlock:
8705         if (!ret) {
8706                 sb_end_pagefault(inode->i_sb);
8707                 return VM_FAULT_LOCKED;
8708         }
8709         unlock_page(page);
8710 out:
8711         btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
8712 out_noreserve:
8713         sb_end_pagefault(inode->i_sb);
8714         return ret;
8715 }
8716
8717 static int btrfs_truncate(struct inode *inode)
8718 {
8719         struct btrfs_root *root = BTRFS_I(inode)->root;
8720         struct btrfs_block_rsv *rsv;
8721         int ret = 0;
8722         int err = 0;
8723         struct btrfs_trans_handle *trans;
8724         u64 mask = root->sectorsize - 1;
8725         u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
8726
8727         ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask),
8728                                        (u64)-1);
8729         if (ret)
8730                 return ret;
8731
8732         /*
8733          * Yes ladies and gentelment, this is indeed ugly.  The fact is we have
8734          * 3 things going on here
8735          *
8736          * 1) We need to reserve space for our orphan item and the space to
8737          * delete our orphan item.  Lord knows we don't want to have a dangling
8738          * orphan item because we didn't reserve space to remove it.
8739          *
8740          * 2) We need to reserve space to update our inode.
8741          *
8742          * 3) We need to have something to cache all the space that is going to
8743          * be free'd up by the truncate operation, but also have some slack
8744          * space reserved in case it uses space during the truncate (thank you
8745          * very much snapshotting).
8746          *
8747          * And we need these to all be seperate.  The fact is we can use alot of
8748          * space doing the truncate, and we have no earthly idea how much space
8749          * we will use, so we need the truncate reservation to be seperate so it
8750          * doesn't end up using space reserved for updating the inode or
8751          * removing the orphan item.  We also need to be able to stop the
8752          * transaction and start a new one, which means we need to be able to
8753          * update the inode several times, and we have no idea of knowing how
8754          * many times that will be, so we can't just reserve 1 item for the
8755          * entirety of the opration, so that has to be done seperately as well.
8756          * Then there is the orphan item, which does indeed need to be held on
8757          * to for the whole operation, and we need nobody to touch this reserved
8758          * space except the orphan code.
8759          *
8760          * So that leaves us with
8761          *
8762          * 1) root->orphan_block_rsv - for the orphan deletion.
8763          * 2) rsv - for the truncate reservation, which we will steal from the
8764          * transaction reservation.
8765          * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
8766          * updating the inode.
8767          */
8768         rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
8769         if (!rsv)
8770                 return -ENOMEM;
8771         rsv->size = min_size;
8772         rsv->failfast = 1;
8773
8774         /*
8775          * 1 for the truncate slack space
8776          * 1 for updating the inode.
8777          */
8778         trans = btrfs_start_transaction(root, 2);
8779         if (IS_ERR(trans)) {
8780                 err = PTR_ERR(trans);
8781                 goto out;
8782         }
8783
8784         /* Migrate the slack space for the truncate to our reserve */
8785         ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
8786                                       min_size);
8787         BUG_ON(ret);
8788
8789         /*
8790          * So if we truncate and then write and fsync we normally would just
8791          * write the extents that changed, which is a problem if we need to
8792          * first truncate that entire inode.  So set this flag so we write out
8793          * all of the extents in the inode to the sync log so we're completely
8794          * safe.
8795          */
8796         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
8797         trans->block_rsv = rsv;
8798
8799         while (1) {
8800                 ret = btrfs_truncate_inode_items(trans, root, inode,
8801                                                  inode->i_size,
8802                                                  BTRFS_EXTENT_DATA_KEY);
8803                 if (ret != -ENOSPC && ret != -EAGAIN) {
8804                         err = ret;
8805                         break;
8806                 }
8807
8808                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8809                 ret = btrfs_update_inode(trans, root, inode);
8810                 if (ret) {
8811                         err = ret;
8812                         break;
8813                 }
8814
8815                 btrfs_end_transaction(trans, root);
8816                 btrfs_btree_balance_dirty(root);
8817
8818                 trans = btrfs_start_transaction(root, 2);
8819                 if (IS_ERR(trans)) {
8820                         ret = err = PTR_ERR(trans);
8821                         trans = NULL;
8822                         break;
8823                 }
8824
8825                 ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
8826                                               rsv, min_size);
8827                 BUG_ON(ret);    /* shouldn't happen */
8828                 trans->block_rsv = rsv;
8829         }
8830
8831         if (ret == 0 && inode->i_nlink > 0) {
8832                 trans->block_rsv = root->orphan_block_rsv;
8833                 ret = btrfs_orphan_del(trans, inode);
8834                 if (ret)
8835                         err = ret;
8836         }
8837
8838         if (trans) {
8839                 trans->block_rsv = &root->fs_info->trans_block_rsv;
8840                 ret = btrfs_update_inode(trans, root, inode);
8841                 if (ret && !err)
8842                         err = ret;
8843
8844                 ret = btrfs_end_transaction(trans, root);
8845                 btrfs_btree_balance_dirty(root);
8846         }
8847
8848 out:
8849         btrfs_free_block_rsv(root, rsv);
8850
8851         if (ret && !err)
8852                 err = ret;
8853
8854         return err;
8855 }
8856
8857 /*
8858  * create a new subvolume directory/inode (helper for the ioctl).
8859  */
8860 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
8861                              struct btrfs_root *new_root,
8862                              struct btrfs_root *parent_root,
8863                              u64 new_dirid)
8864 {
8865         struct inode *inode;
8866         int err;
8867         u64 index = 0;
8868
8869         inode = btrfs_new_inode(trans, new_root, NULL, "..", 2,
8870                                 new_dirid, new_dirid,
8871                                 S_IFDIR | (~current_umask() & S_IRWXUGO),
8872                                 &index);
8873         if (IS_ERR(inode))
8874                 return PTR_ERR(inode);
8875         inode->i_op = &btrfs_dir_inode_operations;
8876         inode->i_fop = &btrfs_dir_file_operations;
8877
8878         set_nlink(inode, 1);
8879         btrfs_i_size_write(inode, 0);
8880         unlock_new_inode(inode);
8881
8882         err = btrfs_subvol_inherit_props(trans, new_root, parent_root);
8883         if (err)
8884                 btrfs_err(new_root->fs_info,
8885                           "error inheriting subvolume %llu properties: %d",
8886                           new_root->root_key.objectid, err);
8887
8888         err = btrfs_update_inode(trans, new_root, inode);
8889
8890         iput(inode);
8891         return err;
8892 }
8893
8894 struct inode *btrfs_alloc_inode(struct super_block *sb)
8895 {
8896         struct btrfs_inode *ei;
8897         struct inode *inode;
8898
8899         ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
8900         if (!ei)
8901                 return NULL;
8902
8903         ei->root = NULL;
8904         ei->generation = 0;
8905         ei->last_trans = 0;
8906         ei->last_sub_trans = 0;
8907         ei->logged_trans = 0;
8908         ei->delalloc_bytes = 0;
8909         ei->defrag_bytes = 0;
8910         ei->disk_i_size = 0;
8911         ei->flags = 0;
8912         ei->csum_bytes = 0;
8913         ei->index_cnt = (u64)-1;
8914         ei->dir_index = 0;
8915         ei->last_unlink_trans = 0;
8916         ei->last_log_commit = 0;
8917
8918         spin_lock_init(&ei->lock);
8919         ei->outstanding_extents = 0;
8920         ei->reserved_extents = 0;
8921
8922         ei->runtime_flags = 0;
8923         ei->force_compress = BTRFS_COMPRESS_NONE;
8924
8925         ei->delayed_node = NULL;
8926
8927         ei->i_otime.tv_sec = 0;
8928         ei->i_otime.tv_nsec = 0;
8929
8930         inode = &ei->vfs_inode;
8931         extent_map_tree_init(&ei->extent_tree);
8932         extent_io_tree_init(&ei->io_tree, &inode->i_data);
8933         extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
8934         ei->io_tree.track_uptodate = 1;
8935         ei->io_failure_tree.track_uptodate = 1;
8936         atomic_set(&ei->sync_writers, 0);
8937         mutex_init(&ei->log_mutex);
8938         mutex_init(&ei->delalloc_mutex);
8939         btrfs_ordered_inode_tree_init(&ei->ordered_tree);
8940         INIT_LIST_HEAD(&ei->delalloc_inodes);
8941         RB_CLEAR_NODE(&ei->rb_node);
8942
8943         return inode;
8944 }
8945
8946 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
8947 void btrfs_test_destroy_inode(struct inode *inode)
8948 {
8949         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
8950         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8951 }
8952 #endif
8953
8954 static void btrfs_i_callback(struct rcu_head *head)
8955 {
8956         struct inode *inode = container_of(head, struct inode, i_rcu);
8957         kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
8958 }
8959
8960 void btrfs_destroy_inode(struct inode *inode)
8961 {
8962         struct btrfs_ordered_extent *ordered;
8963         struct btrfs_root *root = BTRFS_I(inode)->root;
8964
8965         WARN_ON(!hlist_empty(&inode->i_dentry));
8966         WARN_ON(inode->i_data.nrpages);
8967         WARN_ON(BTRFS_I(inode)->outstanding_extents);
8968         WARN_ON(BTRFS_I(inode)->reserved_extents);
8969         WARN_ON(BTRFS_I(inode)->delalloc_bytes);
8970         WARN_ON(BTRFS_I(inode)->csum_bytes);
8971         WARN_ON(BTRFS_I(inode)->defrag_bytes);
8972
8973         /*
8974          * This can happen where we create an inode, but somebody else also
8975          * created the same inode and we need to destroy the one we already
8976          * created.
8977          */
8978         if (!root)
8979                 goto free;
8980
8981         if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
8982                      &BTRFS_I(inode)->runtime_flags)) {
8983                 btrfs_info(root->fs_info, "inode %llu still on the orphan list",
8984                         btrfs_ino(inode));
8985                 atomic_dec(&root->orphan_inodes);
8986         }
8987
8988         while (1) {
8989                 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
8990                 if (!ordered)
8991                         break;
8992                 else {
8993                         btrfs_err(root->fs_info, "found ordered extent %llu %llu on inode cleanup",
8994                                 ordered->file_offset, ordered->len);
8995                         btrfs_remove_ordered_extent(inode, ordered);
8996                         btrfs_put_ordered_extent(ordered);
8997                         btrfs_put_ordered_extent(ordered);
8998                 }
8999         }
9000         inode_tree_del(inode);
9001         btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
9002 free:
9003         call_rcu(&inode->i_rcu, btrfs_i_callback);
9004 }
9005
9006 int btrfs_drop_inode(struct inode *inode)
9007 {
9008         struct btrfs_root *root = BTRFS_I(inode)->root;
9009
9010         if (root == NULL)
9011                 return 1;
9012
9013         /* the snap/subvol tree is on deleting */
9014         if (btrfs_root_refs(&root->root_item) == 0)
9015                 return 1;
9016         else
9017                 return generic_drop_inode(inode);
9018 }
9019
9020 static void init_once(void *foo)
9021 {
9022         struct btrfs_inode *ei = (struct btrfs_inode *) foo;
9023
9024         inode_init_once(&ei->vfs_inode);
9025 }
9026
9027 void btrfs_destroy_cachep(void)
9028 {
9029         /*
9030          * Make sure all delayed rcu free inodes are flushed before we
9031          * destroy cache.
9032          */
9033         rcu_barrier();
9034         if (btrfs_inode_cachep)
9035                 kmem_cache_destroy(btrfs_inode_cachep);
9036         if (btrfs_trans_handle_cachep)
9037                 kmem_cache_destroy(btrfs_trans_handle_cachep);
9038         if (btrfs_transaction_cachep)
9039                 kmem_cache_destroy(btrfs_transaction_cachep);
9040         if (btrfs_path_cachep)
9041                 kmem_cache_destroy(btrfs_path_cachep);
9042         if (btrfs_free_space_cachep)
9043                 kmem_cache_destroy(btrfs_free_space_cachep);
9044         if (btrfs_delalloc_work_cachep)
9045                 kmem_cache_destroy(btrfs_delalloc_work_cachep);
9046 }
9047
9048 int btrfs_init_cachep(void)
9049 {
9050         btrfs_inode_cachep = kmem_cache_create("btrfs_inode",
9051                         sizeof(struct btrfs_inode), 0,
9052                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
9053         if (!btrfs_inode_cachep)
9054                 goto fail;
9055
9056         btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
9057                         sizeof(struct btrfs_trans_handle), 0,
9058                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9059         if (!btrfs_trans_handle_cachep)
9060                 goto fail;
9061
9062         btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
9063                         sizeof(struct btrfs_transaction), 0,
9064                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9065         if (!btrfs_transaction_cachep)
9066                 goto fail;
9067
9068         btrfs_path_cachep = kmem_cache_create("btrfs_path",
9069                         sizeof(struct btrfs_path), 0,
9070                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9071         if (!btrfs_path_cachep)
9072                 goto fail;
9073
9074         btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
9075                         sizeof(struct btrfs_free_space), 0,
9076                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
9077         if (!btrfs_free_space_cachep)
9078                 goto fail;
9079
9080         btrfs_delalloc_work_cachep = kmem_cache_create("btrfs_delalloc_work",
9081                         sizeof(struct btrfs_delalloc_work), 0,
9082                         SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
9083                         NULL);
9084         if (!btrfs_delalloc_work_cachep)
9085                 goto fail;
9086
9087         return 0;
9088 fail:
9089         btrfs_destroy_cachep();
9090         return -ENOMEM;
9091 }
9092
9093 static int btrfs_getattr(struct vfsmount *mnt,
9094                          struct dentry *dentry, struct kstat *stat)
9095 {
9096         u64 delalloc_bytes;
9097         struct inode *inode = d_inode(dentry);
9098         u32 blocksize = inode->i_sb->s_blocksize;
9099
9100         generic_fillattr(inode, stat);
9101         stat->dev = BTRFS_I(inode)->root->anon_dev;
9102         stat->blksize = PAGE_CACHE_SIZE;
9103
9104         spin_lock(&BTRFS_I(inode)->lock);
9105         delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
9106         spin_unlock(&BTRFS_I(inode)->lock);
9107         stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
9108                         ALIGN(delalloc_bytes, blocksize)) >> 9;
9109         return 0;
9110 }
9111
9112 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
9113                            struct inode *new_dir, struct dentry *new_dentry)
9114 {
9115         struct btrfs_trans_handle *trans;
9116         struct btrfs_root *root = BTRFS_I(old_dir)->root;
9117         struct btrfs_root *dest = BTRFS_I(new_dir)->root;
9118         struct inode *new_inode = d_inode(new_dentry);
9119         struct inode *old_inode = d_inode(old_dentry);
9120         struct timespec ctime = CURRENT_TIME;
9121         u64 index = 0;
9122         u64 root_objectid;
9123         int ret;
9124         u64 old_ino = btrfs_ino(old_inode);
9125
9126         if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
9127                 return -EPERM;
9128
9129         /* we only allow rename subvolume link between subvolumes */
9130         if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
9131                 return -EXDEV;
9132
9133         if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
9134             (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
9135                 return -ENOTEMPTY;
9136
9137         if (S_ISDIR(old_inode->i_mode) && new_inode &&
9138             new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
9139                 return -ENOTEMPTY;
9140
9141
9142         /* check for collisions, even if the  name isn't there */
9143         ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino,
9144                              new_dentry->d_name.name,
9145                              new_dentry->d_name.len);
9146
9147         if (ret) {
9148                 if (ret == -EEXIST) {
9149                         /* we shouldn't get
9150                          * eexist without a new_inode */
9151                         if (WARN_ON(!new_inode)) {
9152                                 return ret;
9153                         }
9154                 } else {
9155                         /* maybe -EOVERFLOW */
9156                         return ret;
9157                 }
9158         }
9159         ret = 0;
9160
9161         /*
9162          * we're using rename to replace one file with another.  Start IO on it
9163          * now so  we don't add too much work to the end of the transaction
9164          */
9165         if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
9166                 filemap_flush(old_inode->i_mapping);
9167
9168         /* close the racy window with snapshot create/destroy ioctl */
9169         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9170                 down_read(&root->fs_info->subvol_sem);
9171         /*
9172          * We want to reserve the absolute worst case amount of items.  So if
9173          * both inodes are subvols and we need to unlink them then that would
9174          * require 4 item modifications, but if they are both normal inodes it
9175          * would require 5 item modifications, so we'll assume their normal
9176          * inodes.  So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9177          * should cover the worst case number of items we'll modify.
9178          */
9179         trans = btrfs_start_transaction(root, 11);
9180         if (IS_ERR(trans)) {
9181                 ret = PTR_ERR(trans);
9182                 goto out_notrans;
9183         }
9184
9185         if (dest != root)
9186                 btrfs_record_root_in_trans(trans, dest);
9187
9188         ret = btrfs_set_inode_index(new_dir, &index);
9189         if (ret)
9190                 goto out_fail;
9191
9192         BTRFS_I(old_inode)->dir_index = 0ULL;
9193         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9194                 /* force full log commit if subvolume involved. */
9195                 btrfs_set_log_full_commit(root->fs_info, trans);
9196         } else {
9197                 ret = btrfs_insert_inode_ref(trans, dest,
9198                                              new_dentry->d_name.name,
9199                                              new_dentry->d_name.len,
9200                                              old_ino,
9201                                              btrfs_ino(new_dir), index);
9202                 if (ret)
9203                         goto out_fail;
9204                 /*
9205                  * this is an ugly little race, but the rename is required
9206                  * to make sure that if we crash, the inode is either at the
9207                  * old name or the new one.  pinning the log transaction lets
9208                  * us make sure we don't allow a log commit to come in after
9209                  * we unlink the name but before we add the new name back in.
9210                  */
9211                 btrfs_pin_log_trans(root);
9212         }
9213
9214         inode_inc_iversion(old_dir);
9215         inode_inc_iversion(new_dir);
9216         inode_inc_iversion(old_inode);
9217         old_dir->i_ctime = old_dir->i_mtime = ctime;
9218         new_dir->i_ctime = new_dir->i_mtime = ctime;
9219         old_inode->i_ctime = ctime;
9220
9221         if (old_dentry->d_parent != new_dentry->d_parent)
9222                 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
9223
9224         if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
9225                 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
9226                 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
9227                                         old_dentry->d_name.name,
9228                                         old_dentry->d_name.len);
9229         } else {
9230                 ret = __btrfs_unlink_inode(trans, root, old_dir,
9231                                         d_inode(old_dentry),
9232                                         old_dentry->d_name.name,
9233                                         old_dentry->d_name.len);
9234                 if (!ret)
9235                         ret = btrfs_update_inode(trans, root, old_inode);
9236         }
9237         if (ret) {
9238                 btrfs_abort_transaction(trans, root, ret);
9239                 goto out_fail;
9240         }
9241
9242         if (new_inode) {
9243                 inode_inc_iversion(new_inode);
9244                 new_inode->i_ctime = CURRENT_TIME;
9245                 if (unlikely(btrfs_ino(new_inode) ==
9246                              BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
9247                         root_objectid = BTRFS_I(new_inode)->location.objectid;
9248                         ret = btrfs_unlink_subvol(trans, dest, new_dir,
9249                                                 root_objectid,
9250                                                 new_dentry->d_name.name,
9251                                                 new_dentry->d_name.len);
9252                         BUG_ON(new_inode->i_nlink == 0);
9253                 } else {
9254                         ret = btrfs_unlink_inode(trans, dest, new_dir,
9255                                                  d_inode(new_dentry),
9256                                                  new_dentry->d_name.name,
9257                                                  new_dentry->d_name.len);
9258                 }
9259                 if (!ret && new_inode->i_nlink == 0)
9260                         ret = btrfs_orphan_add(trans, d_inode(new_dentry));
9261                 if (ret) {
9262                         btrfs_abort_transaction(trans, root, ret);
9263                         goto out_fail;
9264                 }
9265         }
9266
9267         ret = btrfs_add_link(trans, new_dir, old_inode,
9268                              new_dentry->d_name.name,
9269                              new_dentry->d_name.len, 0, index);
9270         if (ret) {
9271                 btrfs_abort_transaction(trans, root, ret);
9272                 goto out_fail;
9273         }
9274
9275         if (old_inode->i_nlink == 1)
9276                 BTRFS_I(old_inode)->dir_index = index;
9277
9278         if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
9279                 struct dentry *parent = new_dentry->d_parent;
9280                 btrfs_log_new_name(trans, old_inode, old_dir, parent);
9281                 btrfs_end_log_trans(root);
9282         }
9283 out_fail:
9284         btrfs_end_transaction(trans, root);
9285 out_notrans:
9286         if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
9287                 up_read(&root->fs_info->subvol_sem);
9288
9289         return ret;
9290 }
9291
9292 static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry,
9293                          struct inode *new_dir, struct dentry *new_dentry,
9294                          unsigned int flags)
9295 {
9296         if (flags & ~RENAME_NOREPLACE)
9297                 return -EINVAL;
9298
9299         return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry);
9300 }
9301
9302 static void btrfs_run_delalloc_work(struct btrfs_work *work)
9303 {
9304         struct btrfs_delalloc_work *delalloc_work;
9305         struct inode *inode;
9306
9307         delalloc_work = container_of(work, struct btrfs_delalloc_work,
9308                                      work);
9309         inode = delalloc_work->inode;
9310         if (delalloc_work->wait) {
9311                 btrfs_wait_ordered_range(inode, 0, (u64)-1);
9312         } else {
9313                 filemap_flush(inode->i_mapping);
9314                 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
9315                              &BTRFS_I(inode)->runtime_flags))
9316                         filemap_flush(inode->i_mapping);
9317         }
9318
9319         if (delalloc_work->delay_iput)
9320                 btrfs_add_delayed_iput(inode);
9321         else
9322                 iput(inode);
9323         complete(&delalloc_work->completion);
9324 }
9325
9326 struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
9327                                                     int wait, int delay_iput)
9328 {
9329         struct btrfs_delalloc_work *work;
9330
9331         work = kmem_cache_zalloc(btrfs_delalloc_work_cachep, GFP_NOFS);
9332         if (!work)
9333                 return NULL;
9334
9335         init_completion(&work->completion);
9336         INIT_LIST_HEAD(&work->list);
9337         work->inode = inode;
9338         work->wait = wait;
9339         work->delay_iput = delay_iput;
9340         WARN_ON_ONCE(!inode);
9341         btrfs_init_work(&work->work, btrfs_flush_delalloc_helper,
9342                         btrfs_run_delalloc_work, NULL, NULL);
9343
9344         return work;
9345 }
9346
9347 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
9348 {
9349         wait_for_completion(&work->completion);
9350         kmem_cache_free(btrfs_delalloc_work_cachep, work);
9351 }
9352
9353 /*
9354  * some fairly slow code that needs optimization. This walks the list
9355  * of all the inodes with pending delalloc and forces them to disk.
9356  */
9357 static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
9358                                    int nr)
9359 {
9360         struct btrfs_inode *binode;
9361         struct inode *inode;
9362         struct btrfs_delalloc_work *work, *next;
9363         struct list_head works;
9364         struct list_head splice;
9365         int ret = 0;
9366
9367         INIT_LIST_HEAD(&works);
9368         INIT_LIST_HEAD(&splice);
9369
9370         mutex_lock(&root->delalloc_mutex);
9371         spin_lock(&root->delalloc_lock);
9372         list_splice_init(&root->delalloc_inodes, &splice);
9373         while (!list_empty(&splice)) {
9374                 binode = list_entry(splice.next, struct btrfs_inode,
9375                                     delalloc_inodes);
9376
9377                 list_move_tail(&binode->delalloc_inodes,
9378                                &root->delalloc_inodes);
9379                 inode = igrab(&binode->vfs_inode);
9380                 if (!inode) {
9381                         cond_resched_lock(&root->delalloc_lock);
9382                         continue;
9383                 }
9384                 spin_unlock(&root->delalloc_lock);
9385
9386                 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
9387                 if (!work) {
9388                         if (delay_iput)
9389                                 btrfs_add_delayed_iput(inode);
9390                         else
9391                                 iput(inode);
9392                         ret = -ENOMEM;
9393                         goto out;
9394                 }
9395                 list_add_tail(&work->list, &works);
9396                 btrfs_queue_work(root->fs_info->flush_workers,
9397                                  &work->work);
9398                 ret++;
9399                 if (nr != -1 && ret >= nr)
9400                         goto out;
9401                 cond_resched();
9402                 spin_lock(&root->delalloc_lock);
9403         }
9404         spin_unlock(&root->delalloc_lock);
9405
9406 out:
9407         list_for_each_entry_safe(work, next, &works, list) {
9408                 list_del_init(&work->list);
9409                 btrfs_wait_and_free_delalloc_work(work);
9410         }
9411
9412         if (!list_empty_careful(&splice)) {
9413                 spin_lock(&root->delalloc_lock);
9414                 list_splice_tail(&splice, &root->delalloc_inodes);
9415                 spin_unlock(&root->delalloc_lock);
9416         }
9417         mutex_unlock(&root->delalloc_mutex);
9418         return ret;
9419 }
9420
9421 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
9422 {
9423         int ret;
9424
9425         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
9426                 return -EROFS;
9427
9428         ret = __start_delalloc_inodes(root, delay_iput, -1);
9429         if (ret > 0)
9430                 ret = 0;
9431         /*
9432          * the filemap_flush will queue IO into the worker threads, but
9433          * we have to make sure the IO is actually started and that
9434          * ordered extents get created before we return
9435          */
9436         atomic_inc(&root->fs_info->async_submit_draining);
9437         while (atomic_read(&root->fs_info->nr_async_submits) ||
9438               atomic_read(&root->fs_info->async_delalloc_pages)) {
9439                 wait_event(root->fs_info->async_submit_wait,
9440                    (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
9441                     atomic_read(&root->fs_info->async_delalloc_pages) == 0));
9442         }
9443         atomic_dec(&root->fs_info->async_submit_draining);
9444         return ret;
9445 }
9446
9447 int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
9448                                int nr)
9449 {
9450         struct btrfs_root *root;
9451         struct list_head splice;
9452         int ret;
9453
9454         if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
9455                 return -EROFS;
9456
9457         INIT_LIST_HEAD(&splice);
9458
9459         mutex_lock(&fs_info->delalloc_root_mutex);
9460         spin_lock(&fs_info->delalloc_root_lock);
9461         list_splice_init(&fs_info->delalloc_roots, &splice);
9462         while (!list_empty(&splice) && nr) {
9463                 root = list_first_entry(&splice, struct btrfs_root,
9464                                         delalloc_root);
9465                 root = btrfs_grab_fs_root(root);
9466                 BUG_ON(!root);
9467                 list_move_tail(&root->delalloc_root,
9468                                &fs_info->delalloc_roots);
9469                 spin_unlock(&fs_info->delalloc_root_lock);
9470
9471                 ret = __start_delalloc_inodes(root, delay_iput, nr);
9472                 btrfs_put_fs_root(root);
9473                 if (ret < 0)
9474                         goto out;
9475
9476                 if (nr != -1) {
9477                         nr -= ret;
9478                         WARN_ON(nr < 0);
9479                 }
9480                 spin_lock(&fs_info->delalloc_root_lock);
9481         }
9482         spin_unlock(&fs_info->delalloc_root_lock);
9483
9484         ret = 0;
9485         atomic_inc(&fs_info->async_submit_draining);
9486         while (atomic_read(&fs_info->nr_async_submits) ||
9487               atomic_read(&fs_info->async_delalloc_pages)) {
9488                 wait_event(fs_info->async_submit_wait,
9489                    (atomic_read(&fs_info->nr_async_submits) == 0 &&
9490                     atomic_read(&fs_info->async_delalloc_pages) == 0));
9491         }
9492         atomic_dec(&fs_info->async_submit_draining);
9493 out:
9494         if (!list_empty_careful(&splice)) {
9495                 spin_lock(&fs_info->delalloc_root_lock);
9496                 list_splice_tail(&splice, &fs_info->delalloc_roots);
9497                 spin_unlock(&fs_info->delalloc_root_lock);
9498         }
9499         mutex_unlock(&fs_info->delalloc_root_mutex);
9500         return ret;
9501 }
9502
9503 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9504                          const char *symname)
9505 {
9506         struct btrfs_trans_handle *trans;
9507         struct btrfs_root *root = BTRFS_I(dir)->root;
9508         struct btrfs_path *path;
9509         struct btrfs_key key;
9510         struct inode *inode = NULL;
9511         int err;
9512         int drop_inode = 0;
9513         u64 objectid;
9514         u64 index = 0;
9515         int name_len;
9516         int datasize;
9517         unsigned long ptr;
9518         struct btrfs_file_extent_item *ei;
9519         struct extent_buffer *leaf;
9520
9521         name_len = strlen(symname);
9522         if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
9523                 return -ENAMETOOLONG;
9524
9525         /*
9526          * 2 items for inode item and ref
9527          * 2 items for dir items
9528          * 1 item for xattr if selinux is on
9529          */
9530         trans = btrfs_start_transaction(root, 5);
9531         if (IS_ERR(trans))
9532                 return PTR_ERR(trans);
9533
9534         err = btrfs_find_free_ino(root, &objectid);
9535         if (err)
9536                 goto out_unlock;
9537
9538         inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
9539                                 dentry->d_name.len, btrfs_ino(dir), objectid,
9540                                 S_IFLNK|S_IRWXUGO, &index);
9541         if (IS_ERR(inode)) {
9542                 err = PTR_ERR(inode);
9543                 goto out_unlock;
9544         }
9545
9546         /*
9547         * If the active LSM wants to access the inode during
9548         * d_instantiate it needs these. Smack checks to see
9549         * if the filesystem supports xattrs by looking at the
9550         * ops vector.
9551         */
9552         inode->i_fop = &btrfs_file_operations;
9553         inode->i_op = &btrfs_file_inode_operations;
9554         inode->i_mapping->a_ops = &btrfs_aops;
9555         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9556
9557         err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name);
9558         if (err)
9559                 goto out_unlock_inode;
9560
9561         err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
9562         if (err)
9563                 goto out_unlock_inode;
9564
9565         path = btrfs_alloc_path();
9566         if (!path) {
9567                 err = -ENOMEM;
9568                 goto out_unlock_inode;
9569         }
9570         key.objectid = btrfs_ino(inode);
9571         key.offset = 0;
9572         key.type = BTRFS_EXTENT_DATA_KEY;
9573         datasize = btrfs_file_extent_calc_inline_size(name_len);
9574         err = btrfs_insert_empty_item(trans, root, path, &key,
9575                                       datasize);
9576         if (err) {
9577                 btrfs_free_path(path);
9578                 goto out_unlock_inode;
9579         }
9580         leaf = path->nodes[0];
9581         ei = btrfs_item_ptr(leaf, path->slots[0],
9582                             struct btrfs_file_extent_item);
9583         btrfs_set_file_extent_generation(leaf, ei, trans->transid);
9584         btrfs_set_file_extent_type(leaf, ei,
9585                                    BTRFS_FILE_EXTENT_INLINE);
9586         btrfs_set_file_extent_encryption(leaf, ei, 0);
9587         btrfs_set_file_extent_compression(leaf, ei, 0);
9588         btrfs_set_file_extent_other_encoding(leaf, ei, 0);
9589         btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
9590
9591         ptr = btrfs_file_extent_inline_start(ei);
9592         write_extent_buffer(leaf, symname, ptr, name_len);
9593         btrfs_mark_buffer_dirty(leaf);
9594         btrfs_free_path(path);
9595
9596         inode->i_op = &btrfs_symlink_inode_operations;
9597         inode->i_mapping->a_ops = &btrfs_symlink_aops;
9598         inode_set_bytes(inode, name_len);
9599         btrfs_i_size_write(inode, name_len);
9600         err = btrfs_update_inode(trans, root, inode);
9601         if (err) {
9602                 drop_inode = 1;
9603                 goto out_unlock_inode;
9604         }
9605
9606         unlock_new_inode(inode);
9607         d_instantiate(dentry, inode);
9608
9609 out_unlock:
9610         btrfs_end_transaction(trans, root);
9611         if (drop_inode) {
9612                 inode_dec_link_count(inode);
9613                 iput(inode);
9614         }
9615         btrfs_btree_balance_dirty(root);
9616         return err;
9617
9618 out_unlock_inode:
9619         drop_inode = 1;
9620         unlock_new_inode(inode);
9621         goto out_unlock;
9622 }
9623
9624 static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
9625                                        u64 start, u64 num_bytes, u64 min_size,
9626                                        loff_t actual_len, u64 *alloc_hint,
9627                                        struct btrfs_trans_handle *trans)
9628 {
9629         struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
9630         struct extent_map *em;
9631         struct btrfs_root *root = BTRFS_I(inode)->root;
9632         struct btrfs_key ins;
9633         u64 cur_offset = start;
9634         u64 i_size;
9635         u64 cur_bytes;
9636         int ret = 0;
9637         bool own_trans = true;
9638
9639         if (trans)
9640                 own_trans = false;
9641         while (num_bytes > 0) {
9642                 if (own_trans) {
9643                         trans = btrfs_start_transaction(root, 3);
9644                         if (IS_ERR(trans)) {
9645                                 ret = PTR_ERR(trans);
9646                                 break;
9647                         }
9648                 }
9649
9650                 cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
9651                 cur_bytes = max(cur_bytes, min_size);
9652                 ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
9653                                            *alloc_hint, &ins, 1, 0);
9654                 if (ret) {
9655                         if (own_trans)
9656                                 btrfs_end_transaction(trans, root);
9657                         break;
9658                 }
9659
9660                 ret = insert_reserved_file_extent(trans, inode,
9661                                                   cur_offset, ins.objectid,
9662                                                   ins.offset, ins.offset,
9663                                                   ins.offset, 0, 0, 0,
9664                                                   BTRFS_FILE_EXTENT_PREALLOC);
9665                 if (ret) {
9666                         btrfs_free_reserved_extent(root, ins.objectid,
9667                                                    ins.offset, 0);
9668                         btrfs_abort_transaction(trans, root, ret);
9669                         if (own_trans)
9670                                 btrfs_end_transaction(trans, root);
9671                         break;
9672                 }
9673
9674                 btrfs_drop_extent_cache(inode, cur_offset,
9675                                         cur_offset + ins.offset -1, 0);
9676
9677                 em = alloc_extent_map();
9678                 if (!em) {
9679                         set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
9680                                 &BTRFS_I(inode)->runtime_flags);
9681                         goto next;
9682                 }
9683
9684                 em->start = cur_offset;
9685                 em->orig_start = cur_offset;
9686                 em->len = ins.offset;
9687                 em->block_start = ins.objectid;
9688                 em->block_len = ins.offset;
9689                 em->orig_block_len = ins.offset;
9690                 em->ram_bytes = ins.offset;
9691                 em->bdev = root->fs_info->fs_devices->latest_bdev;
9692                 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
9693                 em->generation = trans->transid;
9694
9695                 while (1) {
9696                         write_lock(&em_tree->lock);
9697                         ret = add_extent_mapping(em_tree, em, 1);
9698                         write_unlock(&em_tree->lock);
9699                         if (ret != -EEXIST)
9700                                 break;
9701                         btrfs_drop_extent_cache(inode, cur_offset,
9702                                                 cur_offset + ins.offset - 1,
9703                                                 0);
9704                 }
9705                 free_extent_map(em);
9706 next:
9707                 num_bytes -= ins.offset;
9708                 cur_offset += ins.offset;
9709                 *alloc_hint = ins.objectid + ins.offset;
9710
9711                 inode_inc_iversion(inode);
9712                 inode->i_ctime = CURRENT_TIME;
9713                 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
9714                 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
9715                     (actual_len > inode->i_size) &&
9716                     (cur_offset > inode->i_size)) {
9717                         if (cur_offset > actual_len)
9718                                 i_size = actual_len;
9719                         else
9720                                 i_size = cur_offset;
9721                         i_size_write(inode, i_size);
9722                         btrfs_ordered_update_i_size(inode, i_size, NULL);
9723                 }
9724
9725                 ret = btrfs_update_inode(trans, root, inode);
9726
9727                 if (ret) {
9728                         btrfs_abort_transaction(trans, root, ret);
9729                         if (own_trans)
9730                                 btrfs_end_transaction(trans, root);
9731                         break;
9732                 }
9733
9734                 if (own_trans)
9735                         btrfs_end_transaction(trans, root);
9736         }
9737         return ret;
9738 }
9739
9740 int btrfs_prealloc_file_range(struct inode *inode, int mode,
9741                               u64 start, u64 num_bytes, u64 min_size,
9742                               loff_t actual_len, u64 *alloc_hint)
9743 {
9744         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9745                                            min_size, actual_len, alloc_hint,
9746                                            NULL);
9747 }
9748
9749 int btrfs_prealloc_file_range_trans(struct inode *inode,
9750                                     struct btrfs_trans_handle *trans, int mode,
9751                                     u64 start, u64 num_bytes, u64 min_size,
9752                                     loff_t actual_len, u64 *alloc_hint)
9753 {
9754         return __btrfs_prealloc_file_range(inode, mode, start, num_bytes,
9755                                            min_size, actual_len, alloc_hint, trans);
9756 }
9757
9758 static int btrfs_set_page_dirty(struct page *page)
9759 {
9760         return __set_page_dirty_nobuffers(page);
9761 }
9762
9763 static int btrfs_permission(struct inode *inode, int mask)
9764 {
9765         struct btrfs_root *root = BTRFS_I(inode)->root;
9766         umode_t mode = inode->i_mode;
9767
9768         if (mask & MAY_WRITE &&
9769             (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
9770                 if (btrfs_root_readonly(root))
9771                         return -EROFS;
9772                 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
9773                         return -EACCES;
9774         }
9775         return generic_permission(inode, mask);
9776 }
9777
9778 static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
9779 {
9780         struct btrfs_trans_handle *trans;
9781         struct btrfs_root *root = BTRFS_I(dir)->root;
9782         struct inode *inode = NULL;
9783         u64 objectid;
9784         u64 index;
9785         int ret = 0;
9786
9787         /*
9788          * 5 units required for adding orphan entry
9789          */
9790         trans = btrfs_start_transaction(root, 5);
9791         if (IS_ERR(trans))
9792                 return PTR_ERR(trans);
9793
9794         ret = btrfs_find_free_ino(root, &objectid);
9795         if (ret)
9796                 goto out;
9797
9798         inode = btrfs_new_inode(trans, root, dir, NULL, 0,
9799                                 btrfs_ino(dir), objectid, mode, &index);
9800         if (IS_ERR(inode)) {
9801                 ret = PTR_ERR(inode);
9802                 inode = NULL;
9803                 goto out;
9804         }
9805
9806         inode->i_fop = &btrfs_file_operations;
9807         inode->i_op = &btrfs_file_inode_operations;
9808
9809         inode->i_mapping->a_ops = &btrfs_aops;
9810         BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
9811
9812         ret = btrfs_init_inode_security(trans, inode, dir, NULL);
9813         if (ret)
9814                 goto out_inode;
9815
9816         ret = btrfs_update_inode(trans, root, inode);
9817         if (ret)
9818                 goto out_inode;
9819         ret = btrfs_orphan_add(trans, inode);
9820         if (ret)
9821                 goto out_inode;
9822
9823         /*
9824          * We set number of links to 0 in btrfs_new_inode(), and here we set
9825          * it to 1 because d_tmpfile() will issue a warning if the count is 0,
9826          * through:
9827          *
9828          *    d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
9829          */
9830         set_nlink(inode, 1);
9831         unlock_new_inode(inode);
9832         d_tmpfile(dentry, inode);
9833         mark_inode_dirty(inode);
9834
9835 out:
9836         btrfs_end_transaction(trans, root);
9837         if (ret)
9838                 iput(inode);
9839         btrfs_balance_delayed_items(root);
9840         btrfs_btree_balance_dirty(root);
9841         return ret;
9842
9843 out_inode:
9844         unlock_new_inode(inode);
9845         goto out;
9846
9847 }
9848
9849 /* Inspired by filemap_check_errors() */
9850 int btrfs_inode_check_errors(struct inode *inode)
9851 {
9852         int ret = 0;
9853
9854         if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
9855             test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
9856                 ret = -ENOSPC;
9857         if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
9858             test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
9859                 ret = -EIO;
9860
9861         return ret;
9862 }
9863
9864 static const struct inode_operations btrfs_dir_inode_operations = {
9865         .getattr        = btrfs_getattr,
9866         .lookup         = btrfs_lookup,
9867         .create         = btrfs_create,
9868         .unlink         = btrfs_unlink,
9869         .link           = btrfs_link,
9870         .mkdir          = btrfs_mkdir,
9871         .rmdir          = btrfs_rmdir,
9872         .rename2        = btrfs_rename2,
9873         .symlink        = btrfs_symlink,
9874         .setattr        = btrfs_setattr,
9875         .mknod          = btrfs_mknod,
9876         .setxattr       = btrfs_setxattr,
9877         .getxattr       = btrfs_getxattr,
9878         .listxattr      = btrfs_listxattr,
9879         .removexattr    = btrfs_removexattr,
9880         .permission     = btrfs_permission,
9881         .get_acl        = btrfs_get_acl,
9882         .set_acl        = btrfs_set_acl,
9883         .update_time    = btrfs_update_time,
9884         .tmpfile        = btrfs_tmpfile,
9885 };
9886 static const struct inode_operations btrfs_dir_ro_inode_operations = {
9887         .lookup         = btrfs_lookup,
9888         .permission     = btrfs_permission,
9889         .get_acl        = btrfs_get_acl,
9890         .set_acl        = btrfs_set_acl,
9891         .update_time    = btrfs_update_time,
9892 };
9893
9894 static const struct file_operations btrfs_dir_file_operations = {
9895         .llseek         = generic_file_llseek,
9896         .read           = generic_read_dir,
9897         .iterate        = btrfs_real_readdir,
9898         .unlocked_ioctl = btrfs_ioctl,
9899 #ifdef CONFIG_COMPAT
9900         .compat_ioctl   = btrfs_ioctl,
9901 #endif
9902         .release        = btrfs_release_file,
9903         .fsync          = btrfs_sync_file,
9904 };
9905
9906 static struct extent_io_ops btrfs_extent_io_ops = {
9907         .fill_delalloc = run_delalloc_range,
9908         .submit_bio_hook = btrfs_submit_bio_hook,
9909         .merge_bio_hook = btrfs_merge_bio_hook,
9910         .readpage_end_io_hook = btrfs_readpage_end_io_hook,
9911         .writepage_end_io_hook = btrfs_writepage_end_io_hook,
9912         .writepage_start_hook = btrfs_writepage_start_hook,
9913         .set_bit_hook = btrfs_set_bit_hook,
9914         .clear_bit_hook = btrfs_clear_bit_hook,
9915         .merge_extent_hook = btrfs_merge_extent_hook,
9916         .split_extent_hook = btrfs_split_extent_hook,
9917 };
9918
9919 /*
9920  * btrfs doesn't support the bmap operation because swapfiles
9921  * use bmap to make a mapping of extents in the file.  They assume
9922  * these extents won't change over the life of the file and they
9923  * use the bmap result to do IO directly to the drive.
9924  *
9925  * the btrfs bmap call would return logical addresses that aren't
9926  * suitable for IO and they also will change frequently as COW
9927  * operations happen.  So, swapfile + btrfs == corruption.
9928  *
9929  * For now we're avoiding this by dropping bmap.
9930  */
9931 static const struct address_space_operations btrfs_aops = {
9932         .readpage       = btrfs_readpage,
9933         .writepage      = btrfs_writepage,
9934         .writepages     = btrfs_writepages,
9935         .readpages      = btrfs_readpages,
9936         .direct_IO      = btrfs_direct_IO,
9937         .invalidatepage = btrfs_invalidatepage,
9938         .releasepage    = btrfs_releasepage,
9939         .set_page_dirty = btrfs_set_page_dirty,
9940         .error_remove_page = generic_error_remove_page,
9941 };
9942
9943 static const struct address_space_operations btrfs_symlink_aops = {
9944         .readpage       = btrfs_readpage,
9945         .writepage      = btrfs_writepage,
9946         .invalidatepage = btrfs_invalidatepage,
9947         .releasepage    = btrfs_releasepage,
9948 };
9949
9950 static const struct inode_operations btrfs_file_inode_operations = {
9951         .getattr        = btrfs_getattr,
9952         .setattr        = btrfs_setattr,
9953         .setxattr       = btrfs_setxattr,
9954         .getxattr       = btrfs_getxattr,
9955         .listxattr      = btrfs_listxattr,
9956         .removexattr    = btrfs_removexattr,
9957         .permission     = btrfs_permission,
9958         .fiemap         = btrfs_fiemap,
9959         .get_acl        = btrfs_get_acl,
9960         .set_acl        = btrfs_set_acl,
9961         .update_time    = btrfs_update_time,
9962 };
9963 static const struct inode_operations btrfs_special_inode_operations = {
9964         .getattr        = btrfs_getattr,
9965         .setattr        = btrfs_setattr,
9966         .permission     = btrfs_permission,
9967         .setxattr       = btrfs_setxattr,
9968         .getxattr       = btrfs_getxattr,
9969         .listxattr      = btrfs_listxattr,
9970         .removexattr    = btrfs_removexattr,
9971         .get_acl        = btrfs_get_acl,
9972         .set_acl        = btrfs_set_acl,
9973         .update_time    = btrfs_update_time,
9974 };
9975 static const struct inode_operations btrfs_symlink_inode_operations = {
9976         .readlink       = generic_readlink,
9977         .follow_link    = page_follow_link_light,
9978         .put_link       = page_put_link,
9979         .getattr        = btrfs_getattr,
9980         .setattr        = btrfs_setattr,
9981         .permission     = btrfs_permission,
9982         .setxattr       = btrfs_setxattr,
9983         .getxattr       = btrfs_getxattr,
9984         .listxattr      = btrfs_listxattr,
9985         .removexattr    = btrfs_removexattr,
9986         .update_time    = btrfs_update_time,
9987 };
9988
9989 const struct dentry_operations btrfs_dentry_operations = {
9990         .d_delete       = btrfs_dentry_delete,
9991         .d_release      = btrfs_dentry_release,
9992 };