2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include "transaction.h"
24 #include "print-tree.h"
27 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_key *ins_key,
31 struct btrfs_path *path, int data_size, int extend);
32 static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
34 struct extent_buffer *src, int empty);
35 static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
39 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot);
42 struct btrfs_path *btrfs_alloc_path(void)
44 struct btrfs_path *path;
45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 * set all locked nodes in the path to blocking locks. This should
51 * be done before scheduling
53 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
56 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
57 if (!p->nodes[i] || !p->locks[i])
59 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
60 if (p->locks[i] == BTRFS_READ_LOCK)
61 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
62 else if (p->locks[i] == BTRFS_WRITE_LOCK)
63 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 * reset all the locked nodes in the patch to spinning locks.
70 * held is used to keep lockdep happy, when lockdep is enabled
71 * we set held to a blocking lock before we go around and
72 * retake all the spinlocks in the path. You can safely use NULL
75 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
76 struct extent_buffer *held, int held_rw)
80 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 /* lockdep really cares that we take all of these spinlocks
82 * in the right order. If any of the locks in the path are not
83 * currently blocking, it is going to complain. So, make really
84 * really sure by forcing the path to blocking before we clear
88 btrfs_set_lock_blocking_rw(held, held_rw);
89 if (held_rw == BTRFS_WRITE_LOCK)
90 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
91 else if (held_rw == BTRFS_READ_LOCK)
92 held_rw = BTRFS_READ_LOCK_BLOCKING;
94 btrfs_set_path_blocking(p);
97 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
98 if (p->nodes[i] && p->locks[i]) {
99 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
100 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
101 p->locks[i] = BTRFS_WRITE_LOCK;
102 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
103 p->locks[i] = BTRFS_READ_LOCK;
107 #ifdef CONFIG_DEBUG_LOCK_ALLOC
109 btrfs_clear_lock_blocking_rw(held, held_rw);
113 /* this also releases the path */
114 void btrfs_free_path(struct btrfs_path *p)
118 btrfs_release_path(p);
119 kmem_cache_free(btrfs_path_cachep, p);
123 * path release drops references on the extent buffers in the path
124 * and it drops any locks held by this path
126 * It is safe to call this on paths that no locks or extent buffers held.
128 noinline void btrfs_release_path(struct btrfs_path *p)
132 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
137 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
140 free_extent_buffer(p->nodes[i]);
146 * safely gets a reference on the root node of a tree. A lock
147 * is not taken, so a concurrent writer may put a different node
148 * at the root of the tree. See btrfs_lock_root_node for the
151 * The extent buffer returned by this has a reference taken, so
152 * it won't disappear. It may stop being the root of the tree
153 * at any time because there are no locks held.
155 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
157 struct extent_buffer *eb;
161 eb = rcu_dereference(root->node);
164 * RCU really hurts here, we could free up the root node because
165 * it was cow'ed but we may not get the new root node yet so do
166 * the inc_not_zero dance and if it doesn't work then
167 * synchronize_rcu and try again.
169 if (atomic_inc_not_zero(&eb->refs)) {
179 /* loop around taking references on and locking the root node of the
180 * tree until you end up with a lock on the root. A locked buffer
181 * is returned, with a reference held.
183 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
185 struct extent_buffer *eb;
188 eb = btrfs_root_node(root);
190 if (eb == root->node)
192 btrfs_tree_unlock(eb);
193 free_extent_buffer(eb);
198 /* loop around taking references on and locking the root node of the
199 * tree until you end up with a lock on the root. A locked buffer
200 * is returned, with a reference held.
202 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
204 struct extent_buffer *eb;
207 eb = btrfs_root_node(root);
208 btrfs_tree_read_lock(eb);
209 if (eb == root->node)
211 btrfs_tree_read_unlock(eb);
212 free_extent_buffer(eb);
217 /* cowonly root (everything not a reference counted cow subvolume), just get
218 * put onto a simple dirty list. transaction.c walks this to make sure they
219 * get properly updated on disk.
221 static void add_root_to_dirty_list(struct btrfs_root *root)
223 spin_lock(&root->fs_info->trans_lock);
224 if (root->track_dirty && list_empty(&root->dirty_list)) {
225 list_add(&root->dirty_list,
226 &root->fs_info->dirty_cowonly_roots);
228 spin_unlock(&root->fs_info->trans_lock);
232 * used by snapshot creation to make a copy of a root for a tree with
233 * a given objectid. The buffer with the new root node is returned in
234 * cow_ret, and this func returns zero on success or a negative error code.
236 int btrfs_copy_root(struct btrfs_trans_handle *trans,
237 struct btrfs_root *root,
238 struct extent_buffer *buf,
239 struct extent_buffer **cow_ret, u64 new_root_objectid)
241 struct extent_buffer *cow;
244 struct btrfs_disk_key disk_key;
246 WARN_ON(root->ref_cows && trans->transid !=
247 root->fs_info->running_transaction->transid);
248 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
250 level = btrfs_header_level(buf);
252 btrfs_item_key(buf, &disk_key, 0);
254 btrfs_node_key(buf, &disk_key, 0);
256 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
257 new_root_objectid, &disk_key, level,
262 copy_extent_buffer(cow, buf, 0, 0, cow->len);
263 btrfs_set_header_bytenr(cow, cow->start);
264 btrfs_set_header_generation(cow, trans->transid);
265 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
266 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
267 BTRFS_HEADER_FLAG_RELOC);
268 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
269 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
271 btrfs_set_header_owner(cow, new_root_objectid);
273 write_extent_buffer(cow, root->fs_info->fsid,
274 (unsigned long)btrfs_header_fsid(cow),
277 WARN_ON(btrfs_header_generation(buf) > trans->transid);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
281 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
286 btrfs_mark_buffer_dirty(cow);
292 * check if the tree block can be shared by multiple trees
294 int btrfs_block_can_be_shared(struct btrfs_root *root,
295 struct extent_buffer *buf)
298 * Tree blocks not in refernece counted trees and tree roots
299 * are never shared. If a block was allocated after the last
300 * snapshot and the block was not allocated by tree relocation,
301 * we know the block is not shared.
303 if (root->ref_cows &&
304 buf != root->node && buf != root->commit_root &&
305 (btrfs_header_generation(buf) <=
306 btrfs_root_last_snapshot(&root->root_item) ||
307 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
309 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
310 if (root->ref_cows &&
311 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
317 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
318 struct btrfs_root *root,
319 struct extent_buffer *buf,
320 struct extent_buffer *cow,
330 * Backrefs update rules:
332 * Always use full backrefs for extent pointers in tree block
333 * allocated by tree relocation.
335 * If a shared tree block is no longer referenced by its owner
336 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
337 * use full backrefs for extent pointers in tree block.
339 * If a tree block is been relocating
340 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
341 * use full backrefs for extent pointers in tree block.
342 * The reason for this is some operations (such as drop tree)
343 * are only allowed for blocks use full backrefs.
346 if (btrfs_block_can_be_shared(root, buf)) {
347 ret = btrfs_lookup_extent_info(trans, root, buf->start,
348 buf->len, &refs, &flags);
353 btrfs_std_error(root->fs_info, ret);
358 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
359 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
360 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
365 owner = btrfs_header_owner(buf);
366 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
367 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
370 if ((owner == root->root_key.objectid ||
371 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
372 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
373 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
374 BUG_ON(ret); /* -ENOMEM */
376 if (root->root_key.objectid ==
377 BTRFS_TREE_RELOC_OBJECTID) {
378 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
379 BUG_ON(ret); /* -ENOMEM */
380 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
381 BUG_ON(ret); /* -ENOMEM */
383 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
386 if (root->root_key.objectid ==
387 BTRFS_TREE_RELOC_OBJECTID)
388 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
390 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
391 BUG_ON(ret); /* -ENOMEM */
393 if (new_flags != 0) {
394 ret = btrfs_set_disk_extent_flags(trans, root,
402 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
403 if (root->root_key.objectid ==
404 BTRFS_TREE_RELOC_OBJECTID)
405 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
407 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
408 BUG_ON(ret); /* -ENOMEM */
409 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
410 BUG_ON(ret); /* -ENOMEM */
412 clean_tree_block(trans, root, buf);
419 * does the dirty work in cow of a single block. The parent block (if
420 * supplied) is updated to point to the new cow copy. The new buffer is marked
421 * dirty and returned locked. If you modify the block it needs to be marked
424 * search_start -- an allocation hint for the new block
426 * empty_size -- a hint that you plan on doing more cow. This is the size in
427 * bytes the allocator should try to find free next to the block it returns.
428 * This is just a hint and may be ignored by the allocator.
430 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
431 struct btrfs_root *root,
432 struct extent_buffer *buf,
433 struct extent_buffer *parent, int parent_slot,
434 struct extent_buffer **cow_ret,
435 u64 search_start, u64 empty_size)
437 struct btrfs_disk_key disk_key;
438 struct extent_buffer *cow;
447 btrfs_assert_tree_locked(buf);
449 WARN_ON(root->ref_cows && trans->transid !=
450 root->fs_info->running_transaction->transid);
451 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
453 level = btrfs_header_level(buf);
456 btrfs_item_key(buf, &disk_key, 0);
458 btrfs_node_key(buf, &disk_key, 0);
460 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
462 parent_start = parent->start;
468 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
469 root->root_key.objectid, &disk_key,
470 level, search_start, empty_size, 1);
474 /* cow is set to blocking by btrfs_init_new_buffer */
476 copy_extent_buffer(cow, buf, 0, 0, cow->len);
477 btrfs_set_header_bytenr(cow, cow->start);
478 btrfs_set_header_generation(cow, trans->transid);
479 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
480 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
481 BTRFS_HEADER_FLAG_RELOC);
482 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
483 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
485 btrfs_set_header_owner(cow, root->root_key.objectid);
487 write_extent_buffer(cow, root->fs_info->fsid,
488 (unsigned long)btrfs_header_fsid(cow),
491 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
493 btrfs_abort_transaction(trans, root, ret);
498 btrfs_reloc_cow_block(trans, root, buf, cow);
500 if (buf == root->node) {
501 WARN_ON(parent && parent != buf);
502 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
503 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
504 parent_start = buf->start;
508 extent_buffer_get(cow);
509 rcu_assign_pointer(root->node, cow);
511 btrfs_free_tree_block(trans, root, buf, parent_start,
513 free_extent_buffer(buf);
514 add_root_to_dirty_list(root);
516 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
517 parent_start = parent->start;
521 WARN_ON(trans->transid != btrfs_header_generation(parent));
522 btrfs_set_node_blockptr(parent, parent_slot,
524 btrfs_set_node_ptr_generation(parent, parent_slot,
526 btrfs_mark_buffer_dirty(parent);
527 btrfs_free_tree_block(trans, root, buf, parent_start,
531 btrfs_tree_unlock(buf);
532 free_extent_buffer_stale(buf);
533 btrfs_mark_buffer_dirty(cow);
538 static inline int should_cow_block(struct btrfs_trans_handle *trans,
539 struct btrfs_root *root,
540 struct extent_buffer *buf)
542 /* ensure we can see the force_cow */
546 * We do not need to cow a block if
547 * 1) this block is not created or changed in this transaction;
548 * 2) this block does not belong to TREE_RELOC tree;
549 * 3) the root is not forced COW.
551 * What is forced COW:
552 * when we create snapshot during commiting the transaction,
553 * after we've finished coping src root, we must COW the shared
554 * block to ensure the metadata consistency.
556 if (btrfs_header_generation(buf) == trans->transid &&
557 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
558 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
559 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
566 * cows a single block, see __btrfs_cow_block for the real work.
567 * This version of it has extra checks so that a block isn't cow'd more than
568 * once per transaction, as long as it hasn't been written yet
570 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
571 struct btrfs_root *root, struct extent_buffer *buf,
572 struct extent_buffer *parent, int parent_slot,
573 struct extent_buffer **cow_ret)
578 if (trans->transaction != root->fs_info->running_transaction) {
579 printk(KERN_CRIT "trans %llu running %llu\n",
580 (unsigned long long)trans->transid,
582 root->fs_info->running_transaction->transid);
585 if (trans->transid != root->fs_info->generation) {
586 printk(KERN_CRIT "trans %llu running %llu\n",
587 (unsigned long long)trans->transid,
588 (unsigned long long)root->fs_info->generation);
592 if (!should_cow_block(trans, root, buf)) {
597 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
600 btrfs_set_lock_blocking(parent);
601 btrfs_set_lock_blocking(buf);
603 ret = __btrfs_cow_block(trans, root, buf, parent,
604 parent_slot, cow_ret, search_start, 0);
606 trace_btrfs_cow_block(root, buf, *cow_ret);
612 * helper function for defrag to decide if two blocks pointed to by a
613 * node are actually close by
615 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
617 if (blocknr < other && other - (blocknr + blocksize) < 32768)
619 if (blocknr > other && blocknr - (other + blocksize) < 32768)
625 * compare two keys in a memcmp fashion
627 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
631 btrfs_disk_key_to_cpu(&k1, disk);
633 return btrfs_comp_cpu_keys(&k1, k2);
637 * same as comp_keys only with two btrfs_key's
639 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
641 if (k1->objectid > k2->objectid)
643 if (k1->objectid < k2->objectid)
645 if (k1->type > k2->type)
647 if (k1->type < k2->type)
649 if (k1->offset > k2->offset)
651 if (k1->offset < k2->offset)
657 * this is used by the defrag code to go through all the
658 * leaves pointed to by a node and reallocate them so that
659 * disk order is close to key order
661 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
662 struct btrfs_root *root, struct extent_buffer *parent,
663 int start_slot, int cache_only, u64 *last_ret,
664 struct btrfs_key *progress)
666 struct extent_buffer *cur;
669 u64 search_start = *last_ret;
679 int progress_passed = 0;
680 struct btrfs_disk_key disk_key;
682 parent_level = btrfs_header_level(parent);
683 if (cache_only && parent_level != 1)
686 if (trans->transaction != root->fs_info->running_transaction)
688 if (trans->transid != root->fs_info->generation)
691 parent_nritems = btrfs_header_nritems(parent);
692 blocksize = btrfs_level_size(root, parent_level - 1);
693 end_slot = parent_nritems;
695 if (parent_nritems == 1)
698 btrfs_set_lock_blocking(parent);
700 for (i = start_slot; i < end_slot; i++) {
703 btrfs_node_key(parent, &disk_key, i);
704 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
708 blocknr = btrfs_node_blockptr(parent, i);
709 gen = btrfs_node_ptr_generation(parent, i);
711 last_block = blocknr;
714 other = btrfs_node_blockptr(parent, i - 1);
715 close = close_blocks(blocknr, other, blocksize);
717 if (!close && i < end_slot - 2) {
718 other = btrfs_node_blockptr(parent, i + 1);
719 close = close_blocks(blocknr, other, blocksize);
722 last_block = blocknr;
726 cur = btrfs_find_tree_block(root, blocknr, blocksize);
728 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
731 if (!cur || !uptodate) {
733 free_extent_buffer(cur);
737 cur = read_tree_block(root, blocknr,
741 } else if (!uptodate) {
742 err = btrfs_read_buffer(cur, gen);
744 free_extent_buffer(cur);
749 if (search_start == 0)
750 search_start = last_block;
752 btrfs_tree_lock(cur);
753 btrfs_set_lock_blocking(cur);
754 err = __btrfs_cow_block(trans, root, cur, parent, i,
757 (end_slot - i) * blocksize));
759 btrfs_tree_unlock(cur);
760 free_extent_buffer(cur);
763 search_start = cur->start;
764 last_block = cur->start;
765 *last_ret = search_start;
766 btrfs_tree_unlock(cur);
767 free_extent_buffer(cur);
773 * The leaf data grows from end-to-front in the node.
774 * this returns the address of the start of the last item,
775 * which is the stop of the leaf data stack
777 static inline unsigned int leaf_data_end(struct btrfs_root *root,
778 struct extent_buffer *leaf)
780 u32 nr = btrfs_header_nritems(leaf);
782 return BTRFS_LEAF_DATA_SIZE(root);
783 return btrfs_item_offset_nr(leaf, nr - 1);
788 * search for key in the extent_buffer. The items start at offset p,
789 * and they are item_size apart. There are 'max' items in p.
791 * the slot in the array is returned via slot, and it points to
792 * the place where you would insert key if it is not found in
795 * slot may point to max if the key is bigger than all of the keys
797 static noinline int generic_bin_search(struct extent_buffer *eb,
799 int item_size, struct btrfs_key *key,
806 struct btrfs_disk_key *tmp = NULL;
807 struct btrfs_disk_key unaligned;
808 unsigned long offset;
810 unsigned long map_start = 0;
811 unsigned long map_len = 0;
815 mid = (low + high) / 2;
816 offset = p + mid * item_size;
818 if (!kaddr || offset < map_start ||
819 (offset + sizeof(struct btrfs_disk_key)) >
820 map_start + map_len) {
822 err = map_private_extent_buffer(eb, offset,
823 sizeof(struct btrfs_disk_key),
824 &kaddr, &map_start, &map_len);
827 tmp = (struct btrfs_disk_key *)(kaddr + offset -
830 read_extent_buffer(eb, &unaligned,
831 offset, sizeof(unaligned));
836 tmp = (struct btrfs_disk_key *)(kaddr + offset -
839 ret = comp_keys(tmp, key);
855 * simple bin_search frontend that does the right thing for
858 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
859 int level, int *slot)
862 return generic_bin_search(eb,
863 offsetof(struct btrfs_leaf, items),
864 sizeof(struct btrfs_item),
865 key, btrfs_header_nritems(eb),
868 return generic_bin_search(eb,
869 offsetof(struct btrfs_node, ptrs),
870 sizeof(struct btrfs_key_ptr),
871 key, btrfs_header_nritems(eb),
875 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
876 int level, int *slot)
878 return bin_search(eb, key, level, slot);
881 static void root_add_used(struct btrfs_root *root, u32 size)
883 spin_lock(&root->accounting_lock);
884 btrfs_set_root_used(&root->root_item,
885 btrfs_root_used(&root->root_item) + size);
886 spin_unlock(&root->accounting_lock);
889 static void root_sub_used(struct btrfs_root *root, u32 size)
891 spin_lock(&root->accounting_lock);
892 btrfs_set_root_used(&root->root_item,
893 btrfs_root_used(&root->root_item) - size);
894 spin_unlock(&root->accounting_lock);
897 /* given a node and slot number, this reads the blocks it points to. The
898 * extent buffer is returned with a reference taken (but unlocked).
899 * NULL is returned on error.
901 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
902 struct extent_buffer *parent, int slot)
904 int level = btrfs_header_level(parent);
907 if (slot >= btrfs_header_nritems(parent))
912 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
913 btrfs_level_size(root, level - 1),
914 btrfs_node_ptr_generation(parent, slot));
918 * node level balancing, used to make sure nodes are in proper order for
919 * item deletion. We balance from the top down, so we have to make sure
920 * that a deletion won't leave an node completely empty later on.
922 static noinline int balance_level(struct btrfs_trans_handle *trans,
923 struct btrfs_root *root,
924 struct btrfs_path *path, int level)
926 struct extent_buffer *right = NULL;
927 struct extent_buffer *mid;
928 struct extent_buffer *left = NULL;
929 struct extent_buffer *parent = NULL;
933 int orig_slot = path->slots[level];
939 mid = path->nodes[level];
941 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
942 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
943 WARN_ON(btrfs_header_generation(mid) != trans->transid);
945 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
947 if (level < BTRFS_MAX_LEVEL - 1) {
948 parent = path->nodes[level + 1];
949 pslot = path->slots[level + 1];
953 * deal with the case where there is only one pointer in the root
954 * by promoting the node below to a root
957 struct extent_buffer *child;
959 if (btrfs_header_nritems(mid) != 1)
962 /* promote the child to a root */
963 child = read_node_slot(root, mid, 0);
966 btrfs_std_error(root->fs_info, ret);
970 btrfs_tree_lock(child);
971 btrfs_set_lock_blocking(child);
972 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
974 btrfs_tree_unlock(child);
975 free_extent_buffer(child);
979 rcu_assign_pointer(root->node, child);
981 add_root_to_dirty_list(root);
982 btrfs_tree_unlock(child);
984 path->locks[level] = 0;
985 path->nodes[level] = NULL;
986 clean_tree_block(trans, root, mid);
987 btrfs_tree_unlock(mid);
988 /* once for the path */
989 free_extent_buffer(mid);
991 root_sub_used(root, mid->len);
992 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
993 /* once for the root ptr */
994 free_extent_buffer_stale(mid);
997 if (btrfs_header_nritems(mid) >
998 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1001 btrfs_header_nritems(mid);
1003 left = read_node_slot(root, parent, pslot - 1);
1005 btrfs_tree_lock(left);
1006 btrfs_set_lock_blocking(left);
1007 wret = btrfs_cow_block(trans, root, left,
1008 parent, pslot - 1, &left);
1014 right = read_node_slot(root, parent, pslot + 1);
1016 btrfs_tree_lock(right);
1017 btrfs_set_lock_blocking(right);
1018 wret = btrfs_cow_block(trans, root, right,
1019 parent, pslot + 1, &right);
1026 /* first, try to make some room in the middle buffer */
1028 orig_slot += btrfs_header_nritems(left);
1029 wret = push_node_left(trans, root, left, mid, 1);
1032 btrfs_header_nritems(mid);
1036 * then try to empty the right most buffer into the middle
1039 wret = push_node_left(trans, root, mid, right, 1);
1040 if (wret < 0 && wret != -ENOSPC)
1042 if (btrfs_header_nritems(right) == 0) {
1043 clean_tree_block(trans, root, right);
1044 btrfs_tree_unlock(right);
1045 del_ptr(trans, root, path, level + 1, pslot + 1);
1046 root_sub_used(root, right->len);
1047 btrfs_free_tree_block(trans, root, right, 0, 1, 0);
1048 free_extent_buffer_stale(right);
1051 struct btrfs_disk_key right_key;
1052 btrfs_node_key(right, &right_key, 0);
1053 btrfs_set_node_key(parent, &right_key, pslot + 1);
1054 btrfs_mark_buffer_dirty(parent);
1057 if (btrfs_header_nritems(mid) == 1) {
1059 * we're not allowed to leave a node with one item in the
1060 * tree during a delete. A deletion from lower in the tree
1061 * could try to delete the only pointer in this node.
1062 * So, pull some keys from the left.
1063 * There has to be a left pointer at this point because
1064 * otherwise we would have pulled some pointers from the
1069 btrfs_std_error(root->fs_info, ret);
1072 wret = balance_node_right(trans, root, mid, left);
1078 wret = push_node_left(trans, root, left, mid, 1);
1084 if (btrfs_header_nritems(mid) == 0) {
1085 clean_tree_block(trans, root, mid);
1086 btrfs_tree_unlock(mid);
1087 del_ptr(trans, root, path, level + 1, pslot);
1088 root_sub_used(root, mid->len);
1089 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
1090 free_extent_buffer_stale(mid);
1093 /* update the parent key to reflect our changes */
1094 struct btrfs_disk_key mid_key;
1095 btrfs_node_key(mid, &mid_key, 0);
1096 btrfs_set_node_key(parent, &mid_key, pslot);
1097 btrfs_mark_buffer_dirty(parent);
1100 /* update the path */
1102 if (btrfs_header_nritems(left) > orig_slot) {
1103 extent_buffer_get(left);
1104 /* left was locked after cow */
1105 path->nodes[level] = left;
1106 path->slots[level + 1] -= 1;
1107 path->slots[level] = orig_slot;
1109 btrfs_tree_unlock(mid);
1110 free_extent_buffer(mid);
1113 orig_slot -= btrfs_header_nritems(left);
1114 path->slots[level] = orig_slot;
1117 /* double check we haven't messed things up */
1119 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1123 btrfs_tree_unlock(right);
1124 free_extent_buffer(right);
1127 if (path->nodes[level] != left)
1128 btrfs_tree_unlock(left);
1129 free_extent_buffer(left);
1134 /* Node balancing for insertion. Here we only split or push nodes around
1135 * when they are completely full. This is also done top down, so we
1136 * have to be pessimistic.
1138 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1139 struct btrfs_root *root,
1140 struct btrfs_path *path, int level)
1142 struct extent_buffer *right = NULL;
1143 struct extent_buffer *mid;
1144 struct extent_buffer *left = NULL;
1145 struct extent_buffer *parent = NULL;
1149 int orig_slot = path->slots[level];
1154 mid = path->nodes[level];
1155 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1157 if (level < BTRFS_MAX_LEVEL - 1) {
1158 parent = path->nodes[level + 1];
1159 pslot = path->slots[level + 1];
1165 left = read_node_slot(root, parent, pslot - 1);
1167 /* first, try to make some room in the middle buffer */
1171 btrfs_tree_lock(left);
1172 btrfs_set_lock_blocking(left);
1174 left_nr = btrfs_header_nritems(left);
1175 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1178 ret = btrfs_cow_block(trans, root, left, parent,
1183 wret = push_node_left(trans, root,
1190 struct btrfs_disk_key disk_key;
1191 orig_slot += left_nr;
1192 btrfs_node_key(mid, &disk_key, 0);
1193 btrfs_set_node_key(parent, &disk_key, pslot);
1194 btrfs_mark_buffer_dirty(parent);
1195 if (btrfs_header_nritems(left) > orig_slot) {
1196 path->nodes[level] = left;
1197 path->slots[level + 1] -= 1;
1198 path->slots[level] = orig_slot;
1199 btrfs_tree_unlock(mid);
1200 free_extent_buffer(mid);
1203 btrfs_header_nritems(left);
1204 path->slots[level] = orig_slot;
1205 btrfs_tree_unlock(left);
1206 free_extent_buffer(left);
1210 btrfs_tree_unlock(left);
1211 free_extent_buffer(left);
1213 right = read_node_slot(root, parent, pslot + 1);
1216 * then try to empty the right most buffer into the middle
1221 btrfs_tree_lock(right);
1222 btrfs_set_lock_blocking(right);
1224 right_nr = btrfs_header_nritems(right);
1225 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1228 ret = btrfs_cow_block(trans, root, right,
1234 wret = balance_node_right(trans, root,
1241 struct btrfs_disk_key disk_key;
1243 btrfs_node_key(right, &disk_key, 0);
1244 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1245 btrfs_mark_buffer_dirty(parent);
1247 if (btrfs_header_nritems(mid) <= orig_slot) {
1248 path->nodes[level] = right;
1249 path->slots[level + 1] += 1;
1250 path->slots[level] = orig_slot -
1251 btrfs_header_nritems(mid);
1252 btrfs_tree_unlock(mid);
1253 free_extent_buffer(mid);
1255 btrfs_tree_unlock(right);
1256 free_extent_buffer(right);
1260 btrfs_tree_unlock(right);
1261 free_extent_buffer(right);
1267 * readahead one full node of leaves, finding things that are close
1268 * to the block in 'slot', and triggering ra on them.
1270 static void reada_for_search(struct btrfs_root *root,
1271 struct btrfs_path *path,
1272 int level, int slot, u64 objectid)
1274 struct extent_buffer *node;
1275 struct btrfs_disk_key disk_key;
1281 int direction = path->reada;
1282 struct extent_buffer *eb;
1290 if (!path->nodes[level])
1293 node = path->nodes[level];
1295 search = btrfs_node_blockptr(node, slot);
1296 blocksize = btrfs_level_size(root, level - 1);
1297 eb = btrfs_find_tree_block(root, search, blocksize);
1299 free_extent_buffer(eb);
1305 nritems = btrfs_header_nritems(node);
1309 if (direction < 0) {
1313 } else if (direction > 0) {
1318 if (path->reada < 0 && objectid) {
1319 btrfs_node_key(node, &disk_key, nr);
1320 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1323 search = btrfs_node_blockptr(node, nr);
1324 if ((search <= target && target - search <= 65536) ||
1325 (search > target && search - target <= 65536)) {
1326 gen = btrfs_node_ptr_generation(node, nr);
1327 readahead_tree_block(root, search, blocksize, gen);
1331 if ((nread > 65536 || nscan > 32))
1337 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1340 static noinline int reada_for_balance(struct btrfs_root *root,
1341 struct btrfs_path *path, int level)
1345 struct extent_buffer *parent;
1346 struct extent_buffer *eb;
1353 parent = path->nodes[level + 1];
1357 nritems = btrfs_header_nritems(parent);
1358 slot = path->slots[level + 1];
1359 blocksize = btrfs_level_size(root, level);
1362 block1 = btrfs_node_blockptr(parent, slot - 1);
1363 gen = btrfs_node_ptr_generation(parent, slot - 1);
1364 eb = btrfs_find_tree_block(root, block1, blocksize);
1366 * if we get -eagain from btrfs_buffer_uptodate, we
1367 * don't want to return eagain here. That will loop
1370 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
1372 free_extent_buffer(eb);
1374 if (slot + 1 < nritems) {
1375 block2 = btrfs_node_blockptr(parent, slot + 1);
1376 gen = btrfs_node_ptr_generation(parent, slot + 1);
1377 eb = btrfs_find_tree_block(root, block2, blocksize);
1378 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
1380 free_extent_buffer(eb);
1382 if (block1 || block2) {
1385 /* release the whole path */
1386 btrfs_release_path(path);
1388 /* read the blocks */
1390 readahead_tree_block(root, block1, blocksize, 0);
1392 readahead_tree_block(root, block2, blocksize, 0);
1395 eb = read_tree_block(root, block1, blocksize, 0);
1396 free_extent_buffer(eb);
1399 eb = read_tree_block(root, block2, blocksize, 0);
1400 free_extent_buffer(eb);
1408 * when we walk down the tree, it is usually safe to unlock the higher layers
1409 * in the tree. The exceptions are when our path goes through slot 0, because
1410 * operations on the tree might require changing key pointers higher up in the
1413 * callers might also have set path->keep_locks, which tells this code to keep
1414 * the lock if the path points to the last slot in the block. This is part of
1415 * walking through the tree, and selecting the next slot in the higher block.
1417 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1418 * if lowest_unlock is 1, level 0 won't be unlocked
1420 static noinline void unlock_up(struct btrfs_path *path, int level,
1421 int lowest_unlock, int min_write_lock_level,
1422 int *write_lock_level)
1425 int skip_level = level;
1427 struct extent_buffer *t;
1429 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1430 if (!path->nodes[i])
1432 if (!path->locks[i])
1434 if (!no_skips && path->slots[i] == 0) {
1438 if (!no_skips && path->keep_locks) {
1441 nritems = btrfs_header_nritems(t);
1442 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1447 if (skip_level < i && i >= lowest_unlock)
1451 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1452 btrfs_tree_unlock_rw(t, path->locks[i]);
1454 if (write_lock_level &&
1455 i > min_write_lock_level &&
1456 i <= *write_lock_level) {
1457 *write_lock_level = i - 1;
1464 * This releases any locks held in the path starting at level and
1465 * going all the way up to the root.
1467 * btrfs_search_slot will keep the lock held on higher nodes in a few
1468 * corner cases, such as COW of the block at slot zero in the node. This
1469 * ignores those rules, and it should only be called when there are no
1470 * more updates to be done higher up in the tree.
1472 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1476 if (path->keep_locks)
1479 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1480 if (!path->nodes[i])
1482 if (!path->locks[i])
1484 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
1490 * helper function for btrfs_search_slot. The goal is to find a block
1491 * in cache without setting the path to blocking. If we find the block
1492 * we return zero and the path is unchanged.
1494 * If we can't find the block, we set the path blocking and do some
1495 * reada. -EAGAIN is returned and the search must be repeated.
1498 read_block_for_search(struct btrfs_trans_handle *trans,
1499 struct btrfs_root *root, struct btrfs_path *p,
1500 struct extent_buffer **eb_ret, int level, int slot,
1501 struct btrfs_key *key)
1506 struct extent_buffer *b = *eb_ret;
1507 struct extent_buffer *tmp;
1510 blocknr = btrfs_node_blockptr(b, slot);
1511 gen = btrfs_node_ptr_generation(b, slot);
1512 blocksize = btrfs_level_size(root, level - 1);
1514 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1516 /* first we do an atomic uptodate check */
1517 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
1518 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
1520 * we found an up to date block without
1527 /* the pages were up to date, but we failed
1528 * the generation number check. Do a full
1529 * read for the generation number that is correct.
1530 * We must do this without dropping locks so
1531 * we can trust our generation number
1533 free_extent_buffer(tmp);
1534 btrfs_set_path_blocking(p);
1536 /* now we're allowed to do a blocking uptodate check */
1537 tmp = read_tree_block(root, blocknr, blocksize, gen);
1538 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
1542 free_extent_buffer(tmp);
1543 btrfs_release_path(p);
1549 * reduce lock contention at high levels
1550 * of the btree by dropping locks before
1551 * we read. Don't release the lock on the current
1552 * level because we need to walk this node to figure
1553 * out which blocks to read.
1555 btrfs_unlock_up_safe(p, level + 1);
1556 btrfs_set_path_blocking(p);
1558 free_extent_buffer(tmp);
1560 reada_for_search(root, p, level, slot, key->objectid);
1562 btrfs_release_path(p);
1565 tmp = read_tree_block(root, blocknr, blocksize, 0);
1568 * If the read above didn't mark this buffer up to date,
1569 * it will never end up being up to date. Set ret to EIO now
1570 * and give up so that our caller doesn't loop forever
1573 if (!btrfs_buffer_uptodate(tmp, 0, 0))
1575 free_extent_buffer(tmp);
1581 * helper function for btrfs_search_slot. This does all of the checks
1582 * for node-level blocks and does any balancing required based on
1585 * If no extra work was required, zero is returned. If we had to
1586 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1590 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1591 struct btrfs_root *root, struct btrfs_path *p,
1592 struct extent_buffer *b, int level, int ins_len,
1593 int *write_lock_level)
1596 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1597 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1600 if (*write_lock_level < level + 1) {
1601 *write_lock_level = level + 1;
1602 btrfs_release_path(p);
1606 sret = reada_for_balance(root, p, level);
1610 btrfs_set_path_blocking(p);
1611 sret = split_node(trans, root, p, level);
1612 btrfs_clear_path_blocking(p, NULL, 0);
1619 b = p->nodes[level];
1620 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1621 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1624 if (*write_lock_level < level + 1) {
1625 *write_lock_level = level + 1;
1626 btrfs_release_path(p);
1630 sret = reada_for_balance(root, p, level);
1634 btrfs_set_path_blocking(p);
1635 sret = balance_level(trans, root, p, level);
1636 btrfs_clear_path_blocking(p, NULL, 0);
1642 b = p->nodes[level];
1644 btrfs_release_path(p);
1647 BUG_ON(btrfs_header_nritems(b) == 1);
1658 * look for key in the tree. path is filled in with nodes along the way
1659 * if key is found, we return zero and you can find the item in the leaf
1660 * level of the path (level 0)
1662 * If the key isn't found, the path points to the slot where it should
1663 * be inserted, and 1 is returned. If there are other errors during the
1664 * search a negative error number is returned.
1666 * if ins_len > 0, nodes and leaves will be split as we walk down the
1667 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1670 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1671 *root, struct btrfs_key *key, struct btrfs_path *p, int
1674 struct extent_buffer *b;
1679 int lowest_unlock = 1;
1681 /* everything at write_lock_level or lower must be write locked */
1682 int write_lock_level = 0;
1683 u8 lowest_level = 0;
1684 int min_write_lock_level;
1686 lowest_level = p->lowest_level;
1687 WARN_ON(lowest_level && ins_len > 0);
1688 WARN_ON(p->nodes[0] != NULL);
1693 /* when we are removing items, we might have to go up to level
1694 * two as we update tree pointers Make sure we keep write
1695 * for those levels as well
1697 write_lock_level = 2;
1698 } else if (ins_len > 0) {
1700 * for inserting items, make sure we have a write lock on
1701 * level 1 so we can update keys
1703 write_lock_level = 1;
1707 write_lock_level = -1;
1709 if (cow && (p->keep_locks || p->lowest_level))
1710 write_lock_level = BTRFS_MAX_LEVEL;
1712 min_write_lock_level = write_lock_level;
1716 * we try very hard to do read locks on the root
1718 root_lock = BTRFS_READ_LOCK;
1720 if (p->search_commit_root) {
1722 * the commit roots are read only
1723 * so we always do read locks
1725 b = root->commit_root;
1726 extent_buffer_get(b);
1727 level = btrfs_header_level(b);
1728 if (!p->skip_locking)
1729 btrfs_tree_read_lock(b);
1731 if (p->skip_locking) {
1732 b = btrfs_root_node(root);
1733 level = btrfs_header_level(b);
1735 /* we don't know the level of the root node
1736 * until we actually have it read locked
1738 b = btrfs_read_lock_root_node(root);
1739 level = btrfs_header_level(b);
1740 if (level <= write_lock_level) {
1741 /* whoops, must trade for write lock */
1742 btrfs_tree_read_unlock(b);
1743 free_extent_buffer(b);
1744 b = btrfs_lock_root_node(root);
1745 root_lock = BTRFS_WRITE_LOCK;
1747 /* the level might have changed, check again */
1748 level = btrfs_header_level(b);
1752 p->nodes[level] = b;
1753 if (!p->skip_locking)
1754 p->locks[level] = root_lock;
1757 level = btrfs_header_level(b);
1760 * setup the path here so we can release it under lock
1761 * contention with the cow code
1765 * if we don't really need to cow this block
1766 * then we don't want to set the path blocking,
1767 * so we test it here
1769 if (!should_cow_block(trans, root, b))
1772 btrfs_set_path_blocking(p);
1775 * must have write locks on this node and the
1778 if (level + 1 > write_lock_level) {
1779 write_lock_level = level + 1;
1780 btrfs_release_path(p);
1784 err = btrfs_cow_block(trans, root, b,
1785 p->nodes[level + 1],
1786 p->slots[level + 1], &b);
1793 BUG_ON(!cow && ins_len);
1795 p->nodes[level] = b;
1796 btrfs_clear_path_blocking(p, NULL, 0);
1799 * we have a lock on b and as long as we aren't changing
1800 * the tree, there is no way to for the items in b to change.
1801 * It is safe to drop the lock on our parent before we
1802 * go through the expensive btree search on b.
1804 * If cow is true, then we might be changing slot zero,
1805 * which may require changing the parent. So, we can't
1806 * drop the lock until after we know which slot we're
1810 btrfs_unlock_up_safe(p, level + 1);
1812 ret = bin_search(b, key, level, &slot);
1816 if (ret && slot > 0) {
1820 p->slots[level] = slot;
1821 err = setup_nodes_for_search(trans, root, p, b, level,
1822 ins_len, &write_lock_level);
1829 b = p->nodes[level];
1830 slot = p->slots[level];
1833 * slot 0 is special, if we change the key
1834 * we have to update the parent pointer
1835 * which means we must have a write lock
1838 if (slot == 0 && cow &&
1839 write_lock_level < level + 1) {
1840 write_lock_level = level + 1;
1841 btrfs_release_path(p);
1845 unlock_up(p, level, lowest_unlock,
1846 min_write_lock_level, &write_lock_level);
1848 if (level == lowest_level) {
1854 err = read_block_for_search(trans, root, p,
1855 &b, level, slot, key);
1863 if (!p->skip_locking) {
1864 level = btrfs_header_level(b);
1865 if (level <= write_lock_level) {
1866 err = btrfs_try_tree_write_lock(b);
1868 btrfs_set_path_blocking(p);
1870 btrfs_clear_path_blocking(p, b,
1873 p->locks[level] = BTRFS_WRITE_LOCK;
1875 err = btrfs_try_tree_read_lock(b);
1877 btrfs_set_path_blocking(p);
1878 btrfs_tree_read_lock(b);
1879 btrfs_clear_path_blocking(p, b,
1882 p->locks[level] = BTRFS_READ_LOCK;
1884 p->nodes[level] = b;
1887 p->slots[level] = slot;
1889 btrfs_leaf_free_space(root, b) < ins_len) {
1890 if (write_lock_level < 1) {
1891 write_lock_level = 1;
1892 btrfs_release_path(p);
1896 btrfs_set_path_blocking(p);
1897 err = split_leaf(trans, root, key,
1898 p, ins_len, ret == 0);
1899 btrfs_clear_path_blocking(p, NULL, 0);
1907 if (!p->search_for_split)
1908 unlock_up(p, level, lowest_unlock,
1909 min_write_lock_level, &write_lock_level);
1916 * we don't really know what they plan on doing with the path
1917 * from here on, so for now just mark it as blocking
1919 if (!p->leave_spinning)
1920 btrfs_set_path_blocking(p);
1922 btrfs_release_path(p);
1927 * adjust the pointers going up the tree, starting at level
1928 * making sure the right key of each node is points to 'key'.
1929 * This is used after shifting pointers to the left, so it stops
1930 * fixing up pointers when a given leaf/node is not in slot 0 of the
1934 static void fixup_low_keys(struct btrfs_trans_handle *trans,
1935 struct btrfs_root *root, struct btrfs_path *path,
1936 struct btrfs_disk_key *key, int level)
1939 struct extent_buffer *t;
1941 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1942 int tslot = path->slots[i];
1943 if (!path->nodes[i])
1946 btrfs_set_node_key(t, key, tslot);
1947 btrfs_mark_buffer_dirty(path->nodes[i]);
1956 * This function isn't completely safe. It's the caller's responsibility
1957 * that the new key won't break the order
1959 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1960 struct btrfs_root *root, struct btrfs_path *path,
1961 struct btrfs_key *new_key)
1963 struct btrfs_disk_key disk_key;
1964 struct extent_buffer *eb;
1967 eb = path->nodes[0];
1968 slot = path->slots[0];
1970 btrfs_item_key(eb, &disk_key, slot - 1);
1971 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
1973 if (slot < btrfs_header_nritems(eb) - 1) {
1974 btrfs_item_key(eb, &disk_key, slot + 1);
1975 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
1978 btrfs_cpu_key_to_disk(&disk_key, new_key);
1979 btrfs_set_item_key(eb, &disk_key, slot);
1980 btrfs_mark_buffer_dirty(eb);
1982 fixup_low_keys(trans, root, path, &disk_key, 1);
1986 * try to push data from one node into the next node left in the
1989 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1990 * error, and > 0 if there was no room in the left hand block.
1992 static int push_node_left(struct btrfs_trans_handle *trans,
1993 struct btrfs_root *root, struct extent_buffer *dst,
1994 struct extent_buffer *src, int empty)
2001 src_nritems = btrfs_header_nritems(src);
2002 dst_nritems = btrfs_header_nritems(dst);
2003 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2004 WARN_ON(btrfs_header_generation(src) != trans->transid);
2005 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2007 if (!empty && src_nritems <= 8)
2010 if (push_items <= 0)
2014 push_items = min(src_nritems, push_items);
2015 if (push_items < src_nritems) {
2016 /* leave at least 8 pointers in the node if
2017 * we aren't going to empty it
2019 if (src_nritems - push_items < 8) {
2020 if (push_items <= 8)
2026 push_items = min(src_nritems - 8, push_items);
2028 copy_extent_buffer(dst, src,
2029 btrfs_node_key_ptr_offset(dst_nritems),
2030 btrfs_node_key_ptr_offset(0),
2031 push_items * sizeof(struct btrfs_key_ptr));
2033 if (push_items < src_nritems) {
2034 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2035 btrfs_node_key_ptr_offset(push_items),
2036 (src_nritems - push_items) *
2037 sizeof(struct btrfs_key_ptr));
2039 btrfs_set_header_nritems(src, src_nritems - push_items);
2040 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2041 btrfs_mark_buffer_dirty(src);
2042 btrfs_mark_buffer_dirty(dst);
2048 * try to push data from one node into the next node right in the
2051 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2052 * error, and > 0 if there was no room in the right hand block.
2054 * this will only push up to 1/2 the contents of the left node over
2056 static int balance_node_right(struct btrfs_trans_handle *trans,
2057 struct btrfs_root *root,
2058 struct extent_buffer *dst,
2059 struct extent_buffer *src)
2067 WARN_ON(btrfs_header_generation(src) != trans->transid);
2068 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2070 src_nritems = btrfs_header_nritems(src);
2071 dst_nritems = btrfs_header_nritems(dst);
2072 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2073 if (push_items <= 0)
2076 if (src_nritems < 4)
2079 max_push = src_nritems / 2 + 1;
2080 /* don't try to empty the node */
2081 if (max_push >= src_nritems)
2084 if (max_push < push_items)
2085 push_items = max_push;
2087 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2088 btrfs_node_key_ptr_offset(0),
2090 sizeof(struct btrfs_key_ptr));
2092 copy_extent_buffer(dst, src,
2093 btrfs_node_key_ptr_offset(0),
2094 btrfs_node_key_ptr_offset(src_nritems - push_items),
2095 push_items * sizeof(struct btrfs_key_ptr));
2097 btrfs_set_header_nritems(src, src_nritems - push_items);
2098 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2100 btrfs_mark_buffer_dirty(src);
2101 btrfs_mark_buffer_dirty(dst);
2107 * helper function to insert a new root level in the tree.
2108 * A new node is allocated, and a single item is inserted to
2109 * point to the existing root
2111 * returns zero on success or < 0 on failure.
2113 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2114 struct btrfs_root *root,
2115 struct btrfs_path *path, int level)
2118 struct extent_buffer *lower;
2119 struct extent_buffer *c;
2120 struct extent_buffer *old;
2121 struct btrfs_disk_key lower_key;
2123 BUG_ON(path->nodes[level]);
2124 BUG_ON(path->nodes[level-1] != root->node);
2126 lower = path->nodes[level-1];
2128 btrfs_item_key(lower, &lower_key, 0);
2130 btrfs_node_key(lower, &lower_key, 0);
2132 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2133 root->root_key.objectid, &lower_key,
2134 level, root->node->start, 0, 0);
2138 root_add_used(root, root->nodesize);
2140 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2141 btrfs_set_header_nritems(c, 1);
2142 btrfs_set_header_level(c, level);
2143 btrfs_set_header_bytenr(c, c->start);
2144 btrfs_set_header_generation(c, trans->transid);
2145 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2146 btrfs_set_header_owner(c, root->root_key.objectid);
2148 write_extent_buffer(c, root->fs_info->fsid,
2149 (unsigned long)btrfs_header_fsid(c),
2152 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2153 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2156 btrfs_set_node_key(c, &lower_key, 0);
2157 btrfs_set_node_blockptr(c, 0, lower->start);
2158 lower_gen = btrfs_header_generation(lower);
2159 WARN_ON(lower_gen != trans->transid);
2161 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2163 btrfs_mark_buffer_dirty(c);
2166 rcu_assign_pointer(root->node, c);
2168 /* the super has an extra ref to root->node */
2169 free_extent_buffer(old);
2171 add_root_to_dirty_list(root);
2172 extent_buffer_get(c);
2173 path->nodes[level] = c;
2174 path->locks[level] = BTRFS_WRITE_LOCK;
2175 path->slots[level] = 0;
2180 * worker function to insert a single pointer in a node.
2181 * the node should have enough room for the pointer already
2183 * slot and level indicate where you want the key to go, and
2184 * blocknr is the block the key points to.
2186 static void insert_ptr(struct btrfs_trans_handle *trans,
2187 struct btrfs_root *root, struct btrfs_path *path,
2188 struct btrfs_disk_key *key, u64 bytenr,
2189 int slot, int level)
2191 struct extent_buffer *lower;
2194 BUG_ON(!path->nodes[level]);
2195 btrfs_assert_tree_locked(path->nodes[level]);
2196 lower = path->nodes[level];
2197 nritems = btrfs_header_nritems(lower);
2198 BUG_ON(slot > nritems);
2199 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
2200 if (slot != nritems) {
2201 memmove_extent_buffer(lower,
2202 btrfs_node_key_ptr_offset(slot + 1),
2203 btrfs_node_key_ptr_offset(slot),
2204 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2206 btrfs_set_node_key(lower, key, slot);
2207 btrfs_set_node_blockptr(lower, slot, bytenr);
2208 WARN_ON(trans->transid == 0);
2209 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2210 btrfs_set_header_nritems(lower, nritems + 1);
2211 btrfs_mark_buffer_dirty(lower);
2215 * split the node at the specified level in path in two.
2216 * The path is corrected to point to the appropriate node after the split
2218 * Before splitting this tries to make some room in the node by pushing
2219 * left and right, if either one works, it returns right away.
2221 * returns 0 on success and < 0 on failure
2223 static noinline int split_node(struct btrfs_trans_handle *trans,
2224 struct btrfs_root *root,
2225 struct btrfs_path *path, int level)
2227 struct extent_buffer *c;
2228 struct extent_buffer *split;
2229 struct btrfs_disk_key disk_key;
2234 c = path->nodes[level];
2235 WARN_ON(btrfs_header_generation(c) != trans->transid);
2236 if (c == root->node) {
2237 /* trying to split the root, lets make a new one */
2238 ret = insert_new_root(trans, root, path, level + 1);
2242 ret = push_nodes_for_insert(trans, root, path, level);
2243 c = path->nodes[level];
2244 if (!ret && btrfs_header_nritems(c) <
2245 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2251 c_nritems = btrfs_header_nritems(c);
2252 mid = (c_nritems + 1) / 2;
2253 btrfs_node_key(c, &disk_key, mid);
2255 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2256 root->root_key.objectid,
2257 &disk_key, level, c->start, 0, 0);
2259 return PTR_ERR(split);
2261 root_add_used(root, root->nodesize);
2263 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2264 btrfs_set_header_level(split, btrfs_header_level(c));
2265 btrfs_set_header_bytenr(split, split->start);
2266 btrfs_set_header_generation(split, trans->transid);
2267 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2268 btrfs_set_header_owner(split, root->root_key.objectid);
2269 write_extent_buffer(split, root->fs_info->fsid,
2270 (unsigned long)btrfs_header_fsid(split),
2272 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2273 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2277 copy_extent_buffer(split, c,
2278 btrfs_node_key_ptr_offset(0),
2279 btrfs_node_key_ptr_offset(mid),
2280 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2281 btrfs_set_header_nritems(split, c_nritems - mid);
2282 btrfs_set_header_nritems(c, mid);
2285 btrfs_mark_buffer_dirty(c);
2286 btrfs_mark_buffer_dirty(split);
2288 insert_ptr(trans, root, path, &disk_key, split->start,
2289 path->slots[level + 1] + 1, level + 1);
2291 if (path->slots[level] >= mid) {
2292 path->slots[level] -= mid;
2293 btrfs_tree_unlock(c);
2294 free_extent_buffer(c);
2295 path->nodes[level] = split;
2296 path->slots[level + 1] += 1;
2298 btrfs_tree_unlock(split);
2299 free_extent_buffer(split);
2305 * how many bytes are required to store the items in a leaf. start
2306 * and nr indicate which items in the leaf to check. This totals up the
2307 * space used both by the item structs and the item data
2309 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2312 int nritems = btrfs_header_nritems(l);
2313 int end = min(nritems, start + nr) - 1;
2317 data_len = btrfs_item_end_nr(l, start);
2318 data_len = data_len - btrfs_item_offset_nr(l, end);
2319 data_len += sizeof(struct btrfs_item) * nr;
2320 WARN_ON(data_len < 0);
2325 * The space between the end of the leaf items and
2326 * the start of the leaf data. IOW, how much room
2327 * the leaf has left for both items and data
2329 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2330 struct extent_buffer *leaf)
2332 int nritems = btrfs_header_nritems(leaf);
2334 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2336 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2337 "used %d nritems %d\n",
2338 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2339 leaf_space_used(leaf, 0, nritems), nritems);
2345 * min slot controls the lowest index we're willing to push to the
2346 * right. We'll push up to and including min_slot, but no lower
2348 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2349 struct btrfs_root *root,
2350 struct btrfs_path *path,
2351 int data_size, int empty,
2352 struct extent_buffer *right,
2353 int free_space, u32 left_nritems,
2356 struct extent_buffer *left = path->nodes[0];
2357 struct extent_buffer *upper = path->nodes[1];
2358 struct btrfs_map_token token;
2359 struct btrfs_disk_key disk_key;
2364 struct btrfs_item *item;
2370 btrfs_init_map_token(&token);
2375 nr = max_t(u32, 1, min_slot);
2377 if (path->slots[0] >= left_nritems)
2378 push_space += data_size;
2380 slot = path->slots[1];
2381 i = left_nritems - 1;
2383 item = btrfs_item_nr(left, i);
2385 if (!empty && push_items > 0) {
2386 if (path->slots[0] > i)
2388 if (path->slots[0] == i) {
2389 int space = btrfs_leaf_free_space(root, left);
2390 if (space + push_space * 2 > free_space)
2395 if (path->slots[0] == i)
2396 push_space += data_size;
2398 this_item_size = btrfs_item_size(left, item);
2399 if (this_item_size + sizeof(*item) + push_space > free_space)
2403 push_space += this_item_size + sizeof(*item);
2409 if (push_items == 0)
2412 if (!empty && push_items == left_nritems)
2415 /* push left to right */
2416 right_nritems = btrfs_header_nritems(right);
2418 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2419 push_space -= leaf_data_end(root, left);
2421 /* make room in the right data area */
2422 data_end = leaf_data_end(root, right);
2423 memmove_extent_buffer(right,
2424 btrfs_leaf_data(right) + data_end - push_space,
2425 btrfs_leaf_data(right) + data_end,
2426 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2428 /* copy from the left data area */
2429 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2430 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2431 btrfs_leaf_data(left) + leaf_data_end(root, left),
2434 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2435 btrfs_item_nr_offset(0),
2436 right_nritems * sizeof(struct btrfs_item));
2438 /* copy the items from left to right */
2439 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2440 btrfs_item_nr_offset(left_nritems - push_items),
2441 push_items * sizeof(struct btrfs_item));
2443 /* update the item pointers */
2444 right_nritems += push_items;
2445 btrfs_set_header_nritems(right, right_nritems);
2446 push_space = BTRFS_LEAF_DATA_SIZE(root);
2447 for (i = 0; i < right_nritems; i++) {
2448 item = btrfs_item_nr(right, i);
2449 push_space -= btrfs_token_item_size(right, item, &token);
2450 btrfs_set_token_item_offset(right, item, push_space, &token);
2453 left_nritems -= push_items;
2454 btrfs_set_header_nritems(left, left_nritems);
2457 btrfs_mark_buffer_dirty(left);
2459 clean_tree_block(trans, root, left);
2461 btrfs_mark_buffer_dirty(right);
2463 btrfs_item_key(right, &disk_key, 0);
2464 btrfs_set_node_key(upper, &disk_key, slot + 1);
2465 btrfs_mark_buffer_dirty(upper);
2467 /* then fixup the leaf pointer in the path */
2468 if (path->slots[0] >= left_nritems) {
2469 path->slots[0] -= left_nritems;
2470 if (btrfs_header_nritems(path->nodes[0]) == 0)
2471 clean_tree_block(trans, root, path->nodes[0]);
2472 btrfs_tree_unlock(path->nodes[0]);
2473 free_extent_buffer(path->nodes[0]);
2474 path->nodes[0] = right;
2475 path->slots[1] += 1;
2477 btrfs_tree_unlock(right);
2478 free_extent_buffer(right);
2483 btrfs_tree_unlock(right);
2484 free_extent_buffer(right);
2489 * push some data in the path leaf to the right, trying to free up at
2490 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2492 * returns 1 if the push failed because the other node didn't have enough
2493 * room, 0 if everything worked out and < 0 if there were major errors.
2495 * this will push starting from min_slot to the end of the leaf. It won't
2496 * push any slot lower than min_slot
2498 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2499 *root, struct btrfs_path *path,
2500 int min_data_size, int data_size,
2501 int empty, u32 min_slot)
2503 struct extent_buffer *left = path->nodes[0];
2504 struct extent_buffer *right;
2505 struct extent_buffer *upper;
2511 if (!path->nodes[1])
2514 slot = path->slots[1];
2515 upper = path->nodes[1];
2516 if (slot >= btrfs_header_nritems(upper) - 1)
2519 btrfs_assert_tree_locked(path->nodes[1]);
2521 right = read_node_slot(root, upper, slot + 1);
2525 btrfs_tree_lock(right);
2526 btrfs_set_lock_blocking(right);
2528 free_space = btrfs_leaf_free_space(root, right);
2529 if (free_space < data_size)
2532 /* cow and double check */
2533 ret = btrfs_cow_block(trans, root, right, upper,
2538 free_space = btrfs_leaf_free_space(root, right);
2539 if (free_space < data_size)
2542 left_nritems = btrfs_header_nritems(left);
2543 if (left_nritems == 0)
2546 return __push_leaf_right(trans, root, path, min_data_size, empty,
2547 right, free_space, left_nritems, min_slot);
2549 btrfs_tree_unlock(right);
2550 free_extent_buffer(right);
2555 * push some data in the path leaf to the left, trying to free up at
2556 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2558 * max_slot can put a limit on how far into the leaf we'll push items. The
2559 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2562 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2563 struct btrfs_root *root,
2564 struct btrfs_path *path, int data_size,
2565 int empty, struct extent_buffer *left,
2566 int free_space, u32 right_nritems,
2569 struct btrfs_disk_key disk_key;
2570 struct extent_buffer *right = path->nodes[0];
2574 struct btrfs_item *item;
2575 u32 old_left_nritems;
2579 u32 old_left_item_size;
2580 struct btrfs_map_token token;
2582 btrfs_init_map_token(&token);
2585 nr = min(right_nritems, max_slot);
2587 nr = min(right_nritems - 1, max_slot);
2589 for (i = 0; i < nr; i++) {
2590 item = btrfs_item_nr(right, i);
2592 if (!empty && push_items > 0) {
2593 if (path->slots[0] < i)
2595 if (path->slots[0] == i) {
2596 int space = btrfs_leaf_free_space(root, right);
2597 if (space + push_space * 2 > free_space)
2602 if (path->slots[0] == i)
2603 push_space += data_size;
2605 this_item_size = btrfs_item_size(right, item);
2606 if (this_item_size + sizeof(*item) + push_space > free_space)
2610 push_space += this_item_size + sizeof(*item);
2613 if (push_items == 0) {
2617 if (!empty && push_items == btrfs_header_nritems(right))
2620 /* push data from right to left */
2621 copy_extent_buffer(left, right,
2622 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2623 btrfs_item_nr_offset(0),
2624 push_items * sizeof(struct btrfs_item));
2626 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2627 btrfs_item_offset_nr(right, push_items - 1);
2629 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2630 leaf_data_end(root, left) - push_space,
2631 btrfs_leaf_data(right) +
2632 btrfs_item_offset_nr(right, push_items - 1),
2634 old_left_nritems = btrfs_header_nritems(left);
2635 BUG_ON(old_left_nritems <= 0);
2637 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2638 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2641 item = btrfs_item_nr(left, i);
2643 ioff = btrfs_token_item_offset(left, item, &token);
2644 btrfs_set_token_item_offset(left, item,
2645 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
2648 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2650 /* fixup right node */
2651 if (push_items > right_nritems) {
2652 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2657 if (push_items < right_nritems) {
2658 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2659 leaf_data_end(root, right);
2660 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2661 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2662 btrfs_leaf_data(right) +
2663 leaf_data_end(root, right), push_space);
2665 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2666 btrfs_item_nr_offset(push_items),
2667 (btrfs_header_nritems(right) - push_items) *
2668 sizeof(struct btrfs_item));
2670 right_nritems -= push_items;
2671 btrfs_set_header_nritems(right, right_nritems);
2672 push_space = BTRFS_LEAF_DATA_SIZE(root);
2673 for (i = 0; i < right_nritems; i++) {
2674 item = btrfs_item_nr(right, i);
2676 push_space = push_space - btrfs_token_item_size(right,
2678 btrfs_set_token_item_offset(right, item, push_space, &token);
2681 btrfs_mark_buffer_dirty(left);
2683 btrfs_mark_buffer_dirty(right);
2685 clean_tree_block(trans, root, right);
2687 btrfs_item_key(right, &disk_key, 0);
2688 fixup_low_keys(trans, root, path, &disk_key, 1);
2690 /* then fixup the leaf pointer in the path */
2691 if (path->slots[0] < push_items) {
2692 path->slots[0] += old_left_nritems;
2693 btrfs_tree_unlock(path->nodes[0]);
2694 free_extent_buffer(path->nodes[0]);
2695 path->nodes[0] = left;
2696 path->slots[1] -= 1;
2698 btrfs_tree_unlock(left);
2699 free_extent_buffer(left);
2700 path->slots[0] -= push_items;
2702 BUG_ON(path->slots[0] < 0);
2705 btrfs_tree_unlock(left);
2706 free_extent_buffer(left);
2711 * push some data in the path leaf to the left, trying to free up at
2712 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2714 * max_slot can put a limit on how far into the leaf we'll push items. The
2715 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2718 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2719 *root, struct btrfs_path *path, int min_data_size,
2720 int data_size, int empty, u32 max_slot)
2722 struct extent_buffer *right = path->nodes[0];
2723 struct extent_buffer *left;
2729 slot = path->slots[1];
2732 if (!path->nodes[1])
2735 right_nritems = btrfs_header_nritems(right);
2736 if (right_nritems == 0)
2739 btrfs_assert_tree_locked(path->nodes[1]);
2741 left = read_node_slot(root, path->nodes[1], slot - 1);
2745 btrfs_tree_lock(left);
2746 btrfs_set_lock_blocking(left);
2748 free_space = btrfs_leaf_free_space(root, left);
2749 if (free_space < data_size) {
2754 /* cow and double check */
2755 ret = btrfs_cow_block(trans, root, left,
2756 path->nodes[1], slot - 1, &left);
2758 /* we hit -ENOSPC, but it isn't fatal here */
2764 free_space = btrfs_leaf_free_space(root, left);
2765 if (free_space < data_size) {
2770 return __push_leaf_left(trans, root, path, min_data_size,
2771 empty, left, free_space, right_nritems,
2774 btrfs_tree_unlock(left);
2775 free_extent_buffer(left);
2780 * split the path's leaf in two, making sure there is at least data_size
2781 * available for the resulting leaf level of the path.
2783 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
2784 struct btrfs_root *root,
2785 struct btrfs_path *path,
2786 struct extent_buffer *l,
2787 struct extent_buffer *right,
2788 int slot, int mid, int nritems)
2793 struct btrfs_disk_key disk_key;
2794 struct btrfs_map_token token;
2796 btrfs_init_map_token(&token);
2798 nritems = nritems - mid;
2799 btrfs_set_header_nritems(right, nritems);
2800 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2802 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2803 btrfs_item_nr_offset(mid),
2804 nritems * sizeof(struct btrfs_item));
2806 copy_extent_buffer(right, l,
2807 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2808 data_copy_size, btrfs_leaf_data(l) +
2809 leaf_data_end(root, l), data_copy_size);
2811 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2812 btrfs_item_end_nr(l, mid);
2814 for (i = 0; i < nritems; i++) {
2815 struct btrfs_item *item = btrfs_item_nr(right, i);
2818 ioff = btrfs_token_item_offset(right, item, &token);
2819 btrfs_set_token_item_offset(right, item,
2820 ioff + rt_data_off, &token);
2823 btrfs_set_header_nritems(l, mid);
2824 btrfs_item_key(right, &disk_key, 0);
2825 insert_ptr(trans, root, path, &disk_key, right->start,
2826 path->slots[1] + 1, 1);
2828 btrfs_mark_buffer_dirty(right);
2829 btrfs_mark_buffer_dirty(l);
2830 BUG_ON(path->slots[0] != slot);
2833 btrfs_tree_unlock(path->nodes[0]);
2834 free_extent_buffer(path->nodes[0]);
2835 path->nodes[0] = right;
2836 path->slots[0] -= mid;
2837 path->slots[1] += 1;
2839 btrfs_tree_unlock(right);
2840 free_extent_buffer(right);
2843 BUG_ON(path->slots[0] < 0);
2847 * double splits happen when we need to insert a big item in the middle
2848 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2849 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2852 * We avoid this by trying to push the items on either side of our target
2853 * into the adjacent leaves. If all goes well we can avoid the double split
2856 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
2857 struct btrfs_root *root,
2858 struct btrfs_path *path,
2866 slot = path->slots[0];
2869 * try to push all the items after our slot into the
2872 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
2879 nritems = btrfs_header_nritems(path->nodes[0]);
2881 * our goal is to get our slot at the start or end of a leaf. If
2882 * we've done so we're done
2884 if (path->slots[0] == 0 || path->slots[0] == nritems)
2887 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2890 /* try to push all the items before our slot into the next leaf */
2891 slot = path->slots[0];
2892 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
2905 * split the path's leaf in two, making sure there is at least data_size
2906 * available for the resulting leaf level of the path.
2908 * returns 0 if all went well and < 0 on failure.
2910 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2911 struct btrfs_root *root,
2912 struct btrfs_key *ins_key,
2913 struct btrfs_path *path, int data_size,
2916 struct btrfs_disk_key disk_key;
2917 struct extent_buffer *l;
2921 struct extent_buffer *right;
2925 int num_doubles = 0;
2926 int tried_avoid_double = 0;
2929 slot = path->slots[0];
2930 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2931 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2934 /* first try to make some room by pushing left and right */
2936 wret = push_leaf_right(trans, root, path, data_size,
2941 wret = push_leaf_left(trans, root, path, data_size,
2942 data_size, 0, (u32)-1);
2948 /* did the pushes work? */
2949 if (btrfs_leaf_free_space(root, l) >= data_size)
2953 if (!path->nodes[1]) {
2954 ret = insert_new_root(trans, root, path, 1);
2961 slot = path->slots[0];
2962 nritems = btrfs_header_nritems(l);
2963 mid = (nritems + 1) / 2;
2967 leaf_space_used(l, mid, nritems - mid) + data_size >
2968 BTRFS_LEAF_DATA_SIZE(root)) {
2969 if (slot >= nritems) {
2973 if (mid != nritems &&
2974 leaf_space_used(l, mid, nritems - mid) +
2975 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2976 if (data_size && !tried_avoid_double)
2977 goto push_for_double;
2983 if (leaf_space_used(l, 0, mid) + data_size >
2984 BTRFS_LEAF_DATA_SIZE(root)) {
2985 if (!extend && data_size && slot == 0) {
2987 } else if ((extend || !data_size) && slot == 0) {
2991 if (mid != nritems &&
2992 leaf_space_used(l, mid, nritems - mid) +
2993 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2994 if (data_size && !tried_avoid_double)
2995 goto push_for_double;
3003 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3005 btrfs_item_key(l, &disk_key, mid);
3007 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
3008 root->root_key.objectid,
3009 &disk_key, 0, l->start, 0, 0);
3011 return PTR_ERR(right);
3013 root_add_used(root, root->leafsize);
3015 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
3016 btrfs_set_header_bytenr(right, right->start);
3017 btrfs_set_header_generation(right, trans->transid);
3018 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
3019 btrfs_set_header_owner(right, root->root_key.objectid);
3020 btrfs_set_header_level(right, 0);
3021 write_extent_buffer(right, root->fs_info->fsid,
3022 (unsigned long)btrfs_header_fsid(right),
3025 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3026 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3031 btrfs_set_header_nritems(right, 0);
3032 insert_ptr(trans, root, path, &disk_key, right->start,
3033 path->slots[1] + 1, 1);
3034 btrfs_tree_unlock(path->nodes[0]);
3035 free_extent_buffer(path->nodes[0]);
3036 path->nodes[0] = right;
3038 path->slots[1] += 1;
3040 btrfs_set_header_nritems(right, 0);
3041 insert_ptr(trans, root, path, &disk_key, right->start,
3043 btrfs_tree_unlock(path->nodes[0]);
3044 free_extent_buffer(path->nodes[0]);
3045 path->nodes[0] = right;
3047 if (path->slots[1] == 0)
3048 fixup_low_keys(trans, root, path,
3051 btrfs_mark_buffer_dirty(right);
3055 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3058 BUG_ON(num_doubles != 0);
3066 push_for_double_split(trans, root, path, data_size);
3067 tried_avoid_double = 1;
3068 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3073 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3074 struct btrfs_root *root,
3075 struct btrfs_path *path, int ins_len)
3077 struct btrfs_key key;
3078 struct extent_buffer *leaf;
3079 struct btrfs_file_extent_item *fi;
3084 leaf = path->nodes[0];
3085 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3087 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3088 key.type != BTRFS_EXTENT_CSUM_KEY);
3090 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3093 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3094 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3095 fi = btrfs_item_ptr(leaf, path->slots[0],
3096 struct btrfs_file_extent_item);
3097 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3099 btrfs_release_path(path);
3101 path->keep_locks = 1;
3102 path->search_for_split = 1;
3103 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3104 path->search_for_split = 0;
3109 leaf = path->nodes[0];
3110 /* if our item isn't there or got smaller, return now */
3111 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3114 /* the leaf has changed, it now has room. return now */
3115 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3118 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3119 fi = btrfs_item_ptr(leaf, path->slots[0],
3120 struct btrfs_file_extent_item);
3121 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3125 btrfs_set_path_blocking(path);
3126 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3130 path->keep_locks = 0;
3131 btrfs_unlock_up_safe(path, 1);
3134 path->keep_locks = 0;
3138 static noinline int split_item(struct btrfs_trans_handle *trans,
3139 struct btrfs_root *root,
3140 struct btrfs_path *path,
3141 struct btrfs_key *new_key,
3142 unsigned long split_offset)
3144 struct extent_buffer *leaf;
3145 struct btrfs_item *item;
3146 struct btrfs_item *new_item;
3152 struct btrfs_disk_key disk_key;
3154 leaf = path->nodes[0];
3155 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3157 btrfs_set_path_blocking(path);
3159 item = btrfs_item_nr(leaf, path->slots[0]);
3160 orig_offset = btrfs_item_offset(leaf, item);
3161 item_size = btrfs_item_size(leaf, item);
3163 buf = kmalloc(item_size, GFP_NOFS);
3167 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3168 path->slots[0]), item_size);
3170 slot = path->slots[0] + 1;
3171 nritems = btrfs_header_nritems(leaf);
3172 if (slot != nritems) {
3173 /* shift the items */
3174 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3175 btrfs_item_nr_offset(slot),
3176 (nritems - slot) * sizeof(struct btrfs_item));
3179 btrfs_cpu_key_to_disk(&disk_key, new_key);
3180 btrfs_set_item_key(leaf, &disk_key, slot);
3182 new_item = btrfs_item_nr(leaf, slot);
3184 btrfs_set_item_offset(leaf, new_item, orig_offset);
3185 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3187 btrfs_set_item_offset(leaf, item,
3188 orig_offset + item_size - split_offset);
3189 btrfs_set_item_size(leaf, item, split_offset);
3191 btrfs_set_header_nritems(leaf, nritems + 1);
3193 /* write the data for the start of the original item */
3194 write_extent_buffer(leaf, buf,
3195 btrfs_item_ptr_offset(leaf, path->slots[0]),
3198 /* write the data for the new item */
3199 write_extent_buffer(leaf, buf + split_offset,
3200 btrfs_item_ptr_offset(leaf, slot),
3201 item_size - split_offset);
3202 btrfs_mark_buffer_dirty(leaf);
3204 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3210 * This function splits a single item into two items,
3211 * giving 'new_key' to the new item and splitting the
3212 * old one at split_offset (from the start of the item).
3214 * The path may be released by this operation. After
3215 * the split, the path is pointing to the old item. The
3216 * new item is going to be in the same node as the old one.
3218 * Note, the item being split must be smaller enough to live alone on
3219 * a tree block with room for one extra struct btrfs_item
3221 * This allows us to split the item in place, keeping a lock on the
3222 * leaf the entire time.
3224 int btrfs_split_item(struct btrfs_trans_handle *trans,
3225 struct btrfs_root *root,
3226 struct btrfs_path *path,
3227 struct btrfs_key *new_key,
3228 unsigned long split_offset)
3231 ret = setup_leaf_for_split(trans, root, path,
3232 sizeof(struct btrfs_item));
3236 ret = split_item(trans, root, path, new_key, split_offset);
3241 * This function duplicate a item, giving 'new_key' to the new item.
3242 * It guarantees both items live in the same tree leaf and the new item
3243 * is contiguous with the original item.
3245 * This allows us to split file extent in place, keeping a lock on the
3246 * leaf the entire time.
3248 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3249 struct btrfs_root *root,
3250 struct btrfs_path *path,
3251 struct btrfs_key *new_key)
3253 struct extent_buffer *leaf;
3257 leaf = path->nodes[0];
3258 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3259 ret = setup_leaf_for_split(trans, root, path,
3260 item_size + sizeof(struct btrfs_item));
3265 setup_items_for_insert(trans, root, path, new_key, &item_size,
3266 item_size, item_size +
3267 sizeof(struct btrfs_item), 1);
3268 leaf = path->nodes[0];
3269 memcpy_extent_buffer(leaf,
3270 btrfs_item_ptr_offset(leaf, path->slots[0]),
3271 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3277 * make the item pointed to by the path smaller. new_size indicates
3278 * how small to make it, and from_end tells us if we just chop bytes
3279 * off the end of the item or if we shift the item to chop bytes off
3282 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
3283 struct btrfs_root *root,
3284 struct btrfs_path *path,
3285 u32 new_size, int from_end)
3288 struct extent_buffer *leaf;
3289 struct btrfs_item *item;
3291 unsigned int data_end;
3292 unsigned int old_data_start;
3293 unsigned int old_size;
3294 unsigned int size_diff;
3296 struct btrfs_map_token token;
3298 btrfs_init_map_token(&token);
3300 leaf = path->nodes[0];
3301 slot = path->slots[0];
3303 old_size = btrfs_item_size_nr(leaf, slot);
3304 if (old_size == new_size)
3307 nritems = btrfs_header_nritems(leaf);
3308 data_end = leaf_data_end(root, leaf);
3310 old_data_start = btrfs_item_offset_nr(leaf, slot);
3312 size_diff = old_size - new_size;
3315 BUG_ON(slot >= nritems);
3318 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3320 /* first correct the data pointers */
3321 for (i = slot; i < nritems; i++) {
3323 item = btrfs_item_nr(leaf, i);
3325 ioff = btrfs_token_item_offset(leaf, item, &token);
3326 btrfs_set_token_item_offset(leaf, item,
3327 ioff + size_diff, &token);
3330 /* shift the data */
3332 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3333 data_end + size_diff, btrfs_leaf_data(leaf) +
3334 data_end, old_data_start + new_size - data_end);
3336 struct btrfs_disk_key disk_key;
3339 btrfs_item_key(leaf, &disk_key, slot);
3341 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3343 struct btrfs_file_extent_item *fi;
3345 fi = btrfs_item_ptr(leaf, slot,
3346 struct btrfs_file_extent_item);
3347 fi = (struct btrfs_file_extent_item *)(
3348 (unsigned long)fi - size_diff);
3350 if (btrfs_file_extent_type(leaf, fi) ==
3351 BTRFS_FILE_EXTENT_INLINE) {
3352 ptr = btrfs_item_ptr_offset(leaf, slot);
3353 memmove_extent_buffer(leaf, ptr,
3355 offsetof(struct btrfs_file_extent_item,
3360 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3361 data_end + size_diff, btrfs_leaf_data(leaf) +
3362 data_end, old_data_start - data_end);
3364 offset = btrfs_disk_key_offset(&disk_key);
3365 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3366 btrfs_set_item_key(leaf, &disk_key, slot);
3368 fixup_low_keys(trans, root, path, &disk_key, 1);
3371 item = btrfs_item_nr(leaf, slot);
3372 btrfs_set_item_size(leaf, item, new_size);
3373 btrfs_mark_buffer_dirty(leaf);
3375 if (btrfs_leaf_free_space(root, leaf) < 0) {
3376 btrfs_print_leaf(root, leaf);
3382 * make the item pointed to by the path bigger, data_size is the new size.
3384 void btrfs_extend_item(struct btrfs_trans_handle *trans,
3385 struct btrfs_root *root, struct btrfs_path *path,
3389 struct extent_buffer *leaf;
3390 struct btrfs_item *item;
3392 unsigned int data_end;
3393 unsigned int old_data;
3394 unsigned int old_size;
3396 struct btrfs_map_token token;
3398 btrfs_init_map_token(&token);
3400 leaf = path->nodes[0];
3402 nritems = btrfs_header_nritems(leaf);
3403 data_end = leaf_data_end(root, leaf);
3405 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3406 btrfs_print_leaf(root, leaf);
3409 slot = path->slots[0];
3410 old_data = btrfs_item_end_nr(leaf, slot);
3413 if (slot >= nritems) {
3414 btrfs_print_leaf(root, leaf);
3415 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3421 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3423 /* first correct the data pointers */
3424 for (i = slot; i < nritems; i++) {
3426 item = btrfs_item_nr(leaf, i);
3428 ioff = btrfs_token_item_offset(leaf, item, &token);
3429 btrfs_set_token_item_offset(leaf, item,
3430 ioff - data_size, &token);
3433 /* shift the data */
3434 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3435 data_end - data_size, btrfs_leaf_data(leaf) +
3436 data_end, old_data - data_end);
3438 data_end = old_data;
3439 old_size = btrfs_item_size_nr(leaf, slot);
3440 item = btrfs_item_nr(leaf, slot);
3441 btrfs_set_item_size(leaf, item, old_size + data_size);
3442 btrfs_mark_buffer_dirty(leaf);
3444 if (btrfs_leaf_free_space(root, leaf) < 0) {
3445 btrfs_print_leaf(root, leaf);
3451 * Given a key and some data, insert items into the tree.
3452 * This does all the path init required, making room in the tree if needed.
3453 * Returns the number of keys that were inserted.
3455 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3456 struct btrfs_root *root,
3457 struct btrfs_path *path,
3458 struct btrfs_key *cpu_key, u32 *data_size,
3461 struct extent_buffer *leaf;
3462 struct btrfs_item *item;
3469 unsigned int data_end;
3470 struct btrfs_disk_key disk_key;
3471 struct btrfs_key found_key;
3472 struct btrfs_map_token token;
3474 btrfs_init_map_token(&token);
3476 for (i = 0; i < nr; i++) {
3477 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3478 BTRFS_LEAF_DATA_SIZE(root)) {
3482 total_data += data_size[i];
3483 total_size += data_size[i] + sizeof(struct btrfs_item);
3487 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3493 leaf = path->nodes[0];
3495 nritems = btrfs_header_nritems(leaf);
3496 data_end = leaf_data_end(root, leaf);
3498 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3499 for (i = nr; i >= 0; i--) {
3500 total_data -= data_size[i];
3501 total_size -= data_size[i] + sizeof(struct btrfs_item);
3502 if (total_size < btrfs_leaf_free_space(root, leaf))
3508 slot = path->slots[0];
3511 if (slot != nritems) {
3512 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3514 item = btrfs_item_nr(leaf, slot);
3515 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3517 /* figure out how many keys we can insert in here */
3518 total_data = data_size[0];
3519 for (i = 1; i < nr; i++) {
3520 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3522 total_data += data_size[i];
3526 if (old_data < data_end) {
3527 btrfs_print_leaf(root, leaf);
3528 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3529 slot, old_data, data_end);
3533 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3535 /* first correct the data pointers */
3536 for (i = slot; i < nritems; i++) {
3539 item = btrfs_item_nr(leaf, i);
3540 ioff = btrfs_token_item_offset(leaf, item, &token);
3541 btrfs_set_token_item_offset(leaf, item,
3542 ioff - total_data, &token);
3544 /* shift the items */
3545 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3546 btrfs_item_nr_offset(slot),
3547 (nritems - slot) * sizeof(struct btrfs_item));
3549 /* shift the data */
3550 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3551 data_end - total_data, btrfs_leaf_data(leaf) +
3552 data_end, old_data - data_end);
3553 data_end = old_data;
3556 * this sucks but it has to be done, if we are inserting at
3557 * the end of the leaf only insert 1 of the items, since we
3558 * have no way of knowing whats on the next leaf and we'd have
3559 * to drop our current locks to figure it out
3564 /* setup the item for the new data */
3565 for (i = 0; i < nr; i++) {
3566 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3567 btrfs_set_item_key(leaf, &disk_key, slot + i);
3568 item = btrfs_item_nr(leaf, slot + i);
3569 btrfs_set_token_item_offset(leaf, item,
3570 data_end - data_size[i], &token);
3571 data_end -= data_size[i];
3572 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
3574 btrfs_set_header_nritems(leaf, nritems + nr);
3575 btrfs_mark_buffer_dirty(leaf);
3579 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3580 fixup_low_keys(trans, root, path, &disk_key, 1);
3583 if (btrfs_leaf_free_space(root, leaf) < 0) {
3584 btrfs_print_leaf(root, leaf);
3594 * this is a helper for btrfs_insert_empty_items, the main goal here is
3595 * to save stack depth by doing the bulk of the work in a function
3596 * that doesn't call btrfs_search_slot
3598 void setup_items_for_insert(struct btrfs_trans_handle *trans,
3599 struct btrfs_root *root, struct btrfs_path *path,
3600 struct btrfs_key *cpu_key, u32 *data_size,
3601 u32 total_data, u32 total_size, int nr)
3603 struct btrfs_item *item;
3606 unsigned int data_end;
3607 struct btrfs_disk_key disk_key;
3608 struct extent_buffer *leaf;
3610 struct btrfs_map_token token;
3612 btrfs_init_map_token(&token);
3614 leaf = path->nodes[0];
3615 slot = path->slots[0];
3617 nritems = btrfs_header_nritems(leaf);
3618 data_end = leaf_data_end(root, leaf);
3620 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3621 btrfs_print_leaf(root, leaf);
3622 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3623 total_size, btrfs_leaf_free_space(root, leaf));
3627 if (slot != nritems) {
3628 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3630 if (old_data < data_end) {
3631 btrfs_print_leaf(root, leaf);
3632 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3633 slot, old_data, data_end);
3637 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3639 /* first correct the data pointers */
3640 for (i = slot; i < nritems; i++) {
3643 item = btrfs_item_nr(leaf, i);
3644 ioff = btrfs_token_item_offset(leaf, item, &token);
3645 btrfs_set_token_item_offset(leaf, item,
3646 ioff - total_data, &token);
3648 /* shift the items */
3649 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3650 btrfs_item_nr_offset(slot),
3651 (nritems - slot) * sizeof(struct btrfs_item));
3653 /* shift the data */
3654 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3655 data_end - total_data, btrfs_leaf_data(leaf) +
3656 data_end, old_data - data_end);
3657 data_end = old_data;
3660 /* setup the item for the new data */
3661 for (i = 0; i < nr; i++) {
3662 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3663 btrfs_set_item_key(leaf, &disk_key, slot + i);
3664 item = btrfs_item_nr(leaf, slot + i);
3665 btrfs_set_token_item_offset(leaf, item,
3666 data_end - data_size[i], &token);
3667 data_end -= data_size[i];
3668 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
3671 btrfs_set_header_nritems(leaf, nritems + nr);
3674 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3675 fixup_low_keys(trans, root, path, &disk_key, 1);
3677 btrfs_unlock_up_safe(path, 1);
3678 btrfs_mark_buffer_dirty(leaf);
3680 if (btrfs_leaf_free_space(root, leaf) < 0) {
3681 btrfs_print_leaf(root, leaf);
3687 * Given a key and some data, insert items into the tree.
3688 * This does all the path init required, making room in the tree if needed.
3690 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3691 struct btrfs_root *root,
3692 struct btrfs_path *path,
3693 struct btrfs_key *cpu_key, u32 *data_size,
3702 for (i = 0; i < nr; i++)
3703 total_data += data_size[i];
3705 total_size = total_data + (nr * sizeof(struct btrfs_item));
3706 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3712 slot = path->slots[0];
3715 setup_items_for_insert(trans, root, path, cpu_key, data_size,
3716 total_data, total_size, nr);
3721 * Given a key and some data, insert an item into the tree.
3722 * This does all the path init required, making room in the tree if needed.
3724 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3725 *root, struct btrfs_key *cpu_key, void *data, u32
3729 struct btrfs_path *path;
3730 struct extent_buffer *leaf;
3733 path = btrfs_alloc_path();
3736 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3738 leaf = path->nodes[0];
3739 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3740 write_extent_buffer(leaf, data, ptr, data_size);
3741 btrfs_mark_buffer_dirty(leaf);
3743 btrfs_free_path(path);
3748 * delete the pointer from a given node.
3750 * the tree should have been previously balanced so the deletion does not
3753 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3754 struct btrfs_path *path, int level, int slot)
3756 struct extent_buffer *parent = path->nodes[level];
3759 nritems = btrfs_header_nritems(parent);
3760 if (slot != nritems - 1) {
3761 memmove_extent_buffer(parent,
3762 btrfs_node_key_ptr_offset(slot),
3763 btrfs_node_key_ptr_offset(slot + 1),
3764 sizeof(struct btrfs_key_ptr) *
3765 (nritems - slot - 1));
3768 btrfs_set_header_nritems(parent, nritems);
3769 if (nritems == 0 && parent == root->node) {
3770 BUG_ON(btrfs_header_level(root->node) != 1);
3771 /* just turn the root into a leaf and break */
3772 btrfs_set_header_level(root->node, 0);
3773 } else if (slot == 0) {
3774 struct btrfs_disk_key disk_key;
3776 btrfs_node_key(parent, &disk_key, 0);
3777 fixup_low_keys(trans, root, path, &disk_key, level + 1);
3779 btrfs_mark_buffer_dirty(parent);
3783 * a helper function to delete the leaf pointed to by path->slots[1] and
3786 * This deletes the pointer in path->nodes[1] and frees the leaf
3787 * block extent. zero is returned if it all worked out, < 0 otherwise.
3789 * The path must have already been setup for deleting the leaf, including
3790 * all the proper balancing. path->nodes[1] must be locked.
3792 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3793 struct btrfs_root *root,
3794 struct btrfs_path *path,
3795 struct extent_buffer *leaf)
3797 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3798 del_ptr(trans, root, path, 1, path->slots[1]);
3801 * btrfs_free_extent is expensive, we want to make sure we
3802 * aren't holding any locks when we call it
3804 btrfs_unlock_up_safe(path, 0);
3806 root_sub_used(root, leaf->len);
3808 extent_buffer_get(leaf);
3809 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
3810 free_extent_buffer_stale(leaf);
3813 * delete the item at the leaf level in path. If that empties
3814 * the leaf, remove it from the tree
3816 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3817 struct btrfs_path *path, int slot, int nr)
3819 struct extent_buffer *leaf;
3820 struct btrfs_item *item;
3827 struct btrfs_map_token token;
3829 btrfs_init_map_token(&token);
3831 leaf = path->nodes[0];
3832 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3834 for (i = 0; i < nr; i++)
3835 dsize += btrfs_item_size_nr(leaf, slot + i);
3837 nritems = btrfs_header_nritems(leaf);
3839 if (slot + nr != nritems) {
3840 int data_end = leaf_data_end(root, leaf);
3842 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3844 btrfs_leaf_data(leaf) + data_end,
3845 last_off - data_end);
3847 for (i = slot + nr; i < nritems; i++) {
3850 item = btrfs_item_nr(leaf, i);
3851 ioff = btrfs_token_item_offset(leaf, item, &token);
3852 btrfs_set_token_item_offset(leaf, item,
3853 ioff + dsize, &token);
3856 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3857 btrfs_item_nr_offset(slot + nr),
3858 sizeof(struct btrfs_item) *
3859 (nritems - slot - nr));
3861 btrfs_set_header_nritems(leaf, nritems - nr);
3864 /* delete the leaf if we've emptied it */
3866 if (leaf == root->node) {
3867 btrfs_set_header_level(leaf, 0);
3869 btrfs_set_path_blocking(path);
3870 clean_tree_block(trans, root, leaf);
3871 btrfs_del_leaf(trans, root, path, leaf);
3874 int used = leaf_space_used(leaf, 0, nritems);
3876 struct btrfs_disk_key disk_key;
3878 btrfs_item_key(leaf, &disk_key, 0);
3879 fixup_low_keys(trans, root, path, &disk_key, 1);
3882 /* delete the leaf if it is mostly empty */
3883 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3884 /* push_leaf_left fixes the path.
3885 * make sure the path still points to our leaf
3886 * for possible call to del_ptr below
3888 slot = path->slots[1];
3889 extent_buffer_get(leaf);
3891 btrfs_set_path_blocking(path);
3892 wret = push_leaf_left(trans, root, path, 1, 1,
3894 if (wret < 0 && wret != -ENOSPC)
3897 if (path->nodes[0] == leaf &&
3898 btrfs_header_nritems(leaf)) {
3899 wret = push_leaf_right(trans, root, path, 1,
3901 if (wret < 0 && wret != -ENOSPC)
3905 if (btrfs_header_nritems(leaf) == 0) {
3906 path->slots[1] = slot;
3907 btrfs_del_leaf(trans, root, path, leaf);
3908 free_extent_buffer(leaf);
3911 /* if we're still in the path, make sure
3912 * we're dirty. Otherwise, one of the
3913 * push_leaf functions must have already
3914 * dirtied this buffer
3916 if (path->nodes[0] == leaf)
3917 btrfs_mark_buffer_dirty(leaf);
3918 free_extent_buffer(leaf);
3921 btrfs_mark_buffer_dirty(leaf);
3928 * search the tree again to find a leaf with lesser keys
3929 * returns 0 if it found something or 1 if there are no lesser leaves.
3930 * returns < 0 on io errors.
3932 * This may release the path, and so you may lose any locks held at the
3935 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3937 struct btrfs_key key;
3938 struct btrfs_disk_key found_key;
3941 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3945 else if (key.type > 0)
3947 else if (key.objectid > 0)
3952 btrfs_release_path(path);
3953 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3956 btrfs_item_key(path->nodes[0], &found_key, 0);
3957 ret = comp_keys(&found_key, &key);
3964 * A helper function to walk down the tree starting at min_key, and looking
3965 * for nodes or leaves that are either in cache or have a minimum
3966 * transaction id. This is used by the btree defrag code, and tree logging
3968 * This does not cow, but it does stuff the starting key it finds back
3969 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3970 * key and get a writable path.
3972 * This does lock as it descends, and path->keep_locks should be set
3973 * to 1 by the caller.
3975 * This honors path->lowest_level to prevent descent past a given level
3978 * min_trans indicates the oldest transaction that you are interested
3979 * in walking through. Any nodes or leaves older than min_trans are
3980 * skipped over (without reading them).
3982 * returns zero if something useful was found, < 0 on error and 1 if there
3983 * was nothing in the tree that matched the search criteria.
3985 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
3986 struct btrfs_key *max_key,
3987 struct btrfs_path *path, int cache_only,
3990 struct extent_buffer *cur;
3991 struct btrfs_key found_key;
3998 WARN_ON(!path->keep_locks);
4000 cur = btrfs_read_lock_root_node(root);
4001 level = btrfs_header_level(cur);
4002 WARN_ON(path->nodes[level]);
4003 path->nodes[level] = cur;
4004 path->locks[level] = BTRFS_READ_LOCK;
4006 if (btrfs_header_generation(cur) < min_trans) {
4011 nritems = btrfs_header_nritems(cur);
4012 level = btrfs_header_level(cur);
4013 sret = bin_search(cur, min_key, level, &slot);
4015 /* at the lowest level, we're done, setup the path and exit */
4016 if (level == path->lowest_level) {
4017 if (slot >= nritems)
4020 path->slots[level] = slot;
4021 btrfs_item_key_to_cpu(cur, &found_key, slot);
4024 if (sret && slot > 0)
4027 * check this node pointer against the cache_only and
4028 * min_trans parameters. If it isn't in cache or is too
4029 * old, skip to the next one.
4031 while (slot < nritems) {
4034 struct extent_buffer *tmp;
4035 struct btrfs_disk_key disk_key;
4037 blockptr = btrfs_node_blockptr(cur, slot);
4038 gen = btrfs_node_ptr_generation(cur, slot);
4039 if (gen < min_trans) {
4047 btrfs_node_key(cur, &disk_key, slot);
4048 if (comp_keys(&disk_key, max_key) >= 0) {
4054 tmp = btrfs_find_tree_block(root, blockptr,
4055 btrfs_level_size(root, level - 1));
4057 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
4058 free_extent_buffer(tmp);
4062 free_extent_buffer(tmp);
4067 * we didn't find a candidate key in this node, walk forward
4068 * and find another one
4070 if (slot >= nritems) {
4071 path->slots[level] = slot;
4072 btrfs_set_path_blocking(path);
4073 sret = btrfs_find_next_key(root, path, min_key, level,
4074 cache_only, min_trans);
4076 btrfs_release_path(path);
4082 /* save our key for returning back */
4083 btrfs_node_key_to_cpu(cur, &found_key, slot);
4084 path->slots[level] = slot;
4085 if (level == path->lowest_level) {
4087 unlock_up(path, level, 1, 0, NULL);
4090 btrfs_set_path_blocking(path);
4091 cur = read_node_slot(root, cur, slot);
4092 BUG_ON(!cur); /* -ENOMEM */
4094 btrfs_tree_read_lock(cur);
4096 path->locks[level - 1] = BTRFS_READ_LOCK;
4097 path->nodes[level - 1] = cur;
4098 unlock_up(path, level, 1, 0, NULL);
4099 btrfs_clear_path_blocking(path, NULL, 0);
4103 memcpy(min_key, &found_key, sizeof(found_key));
4104 btrfs_set_path_blocking(path);
4109 * this is similar to btrfs_next_leaf, but does not try to preserve
4110 * and fixup the path. It looks for and returns the next key in the
4111 * tree based on the current path and the cache_only and min_trans
4114 * 0 is returned if another key is found, < 0 if there are any errors
4115 * and 1 is returned if there are no higher keys in the tree
4117 * path->keep_locks should be set to 1 on the search made before
4118 * calling this function.
4120 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4121 struct btrfs_key *key, int level,
4122 int cache_only, u64 min_trans)
4125 struct extent_buffer *c;
4127 WARN_ON(!path->keep_locks);
4128 while (level < BTRFS_MAX_LEVEL) {
4129 if (!path->nodes[level])
4132 slot = path->slots[level] + 1;
4133 c = path->nodes[level];
4135 if (slot >= btrfs_header_nritems(c)) {
4138 struct btrfs_key cur_key;
4139 if (level + 1 >= BTRFS_MAX_LEVEL ||
4140 !path->nodes[level + 1])
4143 if (path->locks[level + 1]) {
4148 slot = btrfs_header_nritems(c) - 1;
4150 btrfs_item_key_to_cpu(c, &cur_key, slot);
4152 btrfs_node_key_to_cpu(c, &cur_key, slot);
4154 orig_lowest = path->lowest_level;
4155 btrfs_release_path(path);
4156 path->lowest_level = level;
4157 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4159 path->lowest_level = orig_lowest;
4163 c = path->nodes[level];
4164 slot = path->slots[level];
4171 btrfs_item_key_to_cpu(c, key, slot);
4173 u64 blockptr = btrfs_node_blockptr(c, slot);
4174 u64 gen = btrfs_node_ptr_generation(c, slot);
4177 struct extent_buffer *cur;
4178 cur = btrfs_find_tree_block(root, blockptr,
4179 btrfs_level_size(root, level - 1));
4181 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
4184 free_extent_buffer(cur);
4187 free_extent_buffer(cur);
4189 if (gen < min_trans) {
4193 btrfs_node_key_to_cpu(c, key, slot);
4201 * search the tree again to find a leaf with greater keys
4202 * returns 0 if it found something or 1 if there are no greater leaves.
4203 * returns < 0 on io errors.
4205 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4209 struct extent_buffer *c;
4210 struct extent_buffer *next;
4211 struct btrfs_key key;
4214 int old_spinning = path->leave_spinning;
4215 int next_rw_lock = 0;
4217 nritems = btrfs_header_nritems(path->nodes[0]);
4221 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4226 btrfs_release_path(path);
4228 path->keep_locks = 1;
4229 path->leave_spinning = 1;
4231 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4232 path->keep_locks = 0;
4237 nritems = btrfs_header_nritems(path->nodes[0]);
4239 * by releasing the path above we dropped all our locks. A balance
4240 * could have added more items next to the key that used to be
4241 * at the very end of the block. So, check again here and
4242 * advance the path if there are now more items available.
4244 if (nritems > 0 && path->slots[0] < nritems - 1) {
4251 while (level < BTRFS_MAX_LEVEL) {
4252 if (!path->nodes[level]) {
4257 slot = path->slots[level] + 1;
4258 c = path->nodes[level];
4259 if (slot >= btrfs_header_nritems(c)) {
4261 if (level == BTRFS_MAX_LEVEL) {
4269 btrfs_tree_unlock_rw(next, next_rw_lock);
4270 free_extent_buffer(next);
4274 next_rw_lock = path->locks[level];
4275 ret = read_block_for_search(NULL, root, path, &next, level,
4281 btrfs_release_path(path);
4285 if (!path->skip_locking) {
4286 ret = btrfs_try_tree_read_lock(next);
4288 btrfs_set_path_blocking(path);
4289 btrfs_tree_read_lock(next);
4290 btrfs_clear_path_blocking(path, next,
4293 next_rw_lock = BTRFS_READ_LOCK;
4297 path->slots[level] = slot;
4300 c = path->nodes[level];
4301 if (path->locks[level])
4302 btrfs_tree_unlock_rw(c, path->locks[level]);
4304 free_extent_buffer(c);
4305 path->nodes[level] = next;
4306 path->slots[level] = 0;
4307 if (!path->skip_locking)
4308 path->locks[level] = next_rw_lock;
4312 ret = read_block_for_search(NULL, root, path, &next, level,
4318 btrfs_release_path(path);
4322 if (!path->skip_locking) {
4323 ret = btrfs_try_tree_read_lock(next);
4325 btrfs_set_path_blocking(path);
4326 btrfs_tree_read_lock(next);
4327 btrfs_clear_path_blocking(path, next,
4330 next_rw_lock = BTRFS_READ_LOCK;
4335 unlock_up(path, 0, 1, 0, NULL);
4336 path->leave_spinning = old_spinning;
4338 btrfs_set_path_blocking(path);
4344 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4345 * searching until it gets past min_objectid or finds an item of 'type'
4347 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4349 int btrfs_previous_item(struct btrfs_root *root,
4350 struct btrfs_path *path, u64 min_objectid,
4353 struct btrfs_key found_key;
4354 struct extent_buffer *leaf;
4359 if (path->slots[0] == 0) {
4360 btrfs_set_path_blocking(path);
4361 ret = btrfs_prev_leaf(root, path);
4367 leaf = path->nodes[0];
4368 nritems = btrfs_header_nritems(leaf);
4371 if (path->slots[0] == nritems)
4374 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4375 if (found_key.objectid < min_objectid)
4377 if (found_key.type == type)
4379 if (found_key.objectid == min_objectid &&
4380 found_key.type < type)