2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
41 struct btrfs_path *path, int level, int slot,
43 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45 struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 u32 blocksize, u64 parent_transid,
48 struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize,
52 struct btrfs_path *btrfs_alloc_path(void)
54 struct btrfs_path *path;
55 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
60 * set all locked nodes in the path to blocking locks. This should
61 * be done before scheduling
63 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
66 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
67 if (!p->nodes[i] || !p->locks[i])
69 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
70 if (p->locks[i] == BTRFS_READ_LOCK)
71 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
72 else if (p->locks[i] == BTRFS_WRITE_LOCK)
73 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
78 * reset all the locked nodes in the patch to spinning locks.
80 * held is used to keep lockdep happy, when lockdep is enabled
81 * we set held to a blocking lock before we go around and
82 * retake all the spinlocks in the path. You can safely use NULL
85 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
86 struct extent_buffer *held, int held_rw)
90 #ifdef CONFIG_DEBUG_LOCK_ALLOC
91 /* lockdep really cares that we take all of these spinlocks
92 * in the right order. If any of the locks in the path are not
93 * currently blocking, it is going to complain. So, make really
94 * really sure by forcing the path to blocking before we clear
98 btrfs_set_lock_blocking_rw(held, held_rw);
99 if (held_rw == BTRFS_WRITE_LOCK)
100 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
101 else if (held_rw == BTRFS_READ_LOCK)
102 held_rw = BTRFS_READ_LOCK_BLOCKING;
104 btrfs_set_path_blocking(p);
107 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
108 if (p->nodes[i] && p->locks[i]) {
109 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
110 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
111 p->locks[i] = BTRFS_WRITE_LOCK;
112 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
113 p->locks[i] = BTRFS_READ_LOCK;
117 #ifdef CONFIG_DEBUG_LOCK_ALLOC
119 btrfs_clear_lock_blocking_rw(held, held_rw);
123 /* this also releases the path */
124 void btrfs_free_path(struct btrfs_path *p)
128 btrfs_release_path(p);
129 kmem_cache_free(btrfs_path_cachep, p);
133 * path release drops references on the extent buffers in the path
134 * and it drops any locks held by this path
136 * It is safe to call this on paths that no locks or extent buffers held.
138 noinline void btrfs_release_path(struct btrfs_path *p)
142 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
147 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
150 free_extent_buffer(p->nodes[i]);
156 * safely gets a reference on the root node of a tree. A lock
157 * is not taken, so a concurrent writer may put a different node
158 * at the root of the tree. See btrfs_lock_root_node for the
161 * The extent buffer returned by this has a reference taken, so
162 * it won't disappear. It may stop being the root of the tree
163 * at any time because there are no locks held.
165 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
167 struct extent_buffer *eb;
171 eb = rcu_dereference(root->node);
174 * RCU really hurts here, we could free up the root node because
175 * it was cow'ed but we may not get the new root node yet so do
176 * the inc_not_zero dance and if it doesn't work then
177 * synchronize_rcu and try again.
179 if (atomic_inc_not_zero(&eb->refs)) {
189 /* loop around taking references on and locking the root node of the
190 * tree until you end up with a lock on the root. A locked buffer
191 * is returned, with a reference held.
193 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
195 struct extent_buffer *eb;
198 eb = btrfs_root_node(root);
200 if (eb == root->node)
202 btrfs_tree_unlock(eb);
203 free_extent_buffer(eb);
208 /* loop around taking references on and locking the root node of the
209 * tree until you end up with a lock on the root. A locked buffer
210 * is returned, with a reference held.
212 struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
214 struct extent_buffer *eb;
217 eb = btrfs_root_node(root);
218 btrfs_tree_read_lock(eb);
219 if (eb == root->node)
221 btrfs_tree_read_unlock(eb);
222 free_extent_buffer(eb);
227 /* cowonly root (everything not a reference counted cow subvolume), just get
228 * put onto a simple dirty list. transaction.c walks this to make sure they
229 * get properly updated on disk.
231 static void add_root_to_dirty_list(struct btrfs_root *root)
233 spin_lock(&root->fs_info->trans_lock);
234 if (root->track_dirty && list_empty(&root->dirty_list)) {
235 list_add(&root->dirty_list,
236 &root->fs_info->dirty_cowonly_roots);
238 spin_unlock(&root->fs_info->trans_lock);
242 * used by snapshot creation to make a copy of a root for a tree with
243 * a given objectid. The buffer with the new root node is returned in
244 * cow_ret, and this func returns zero on success or a negative error code.
246 int btrfs_copy_root(struct btrfs_trans_handle *trans,
247 struct btrfs_root *root,
248 struct extent_buffer *buf,
249 struct extent_buffer **cow_ret, u64 new_root_objectid)
251 struct extent_buffer *cow;
254 struct btrfs_disk_key disk_key;
256 WARN_ON(root->ref_cows && trans->transid !=
257 root->fs_info->running_transaction->transid);
258 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
260 level = btrfs_header_level(buf);
262 btrfs_item_key(buf, &disk_key, 0);
264 btrfs_node_key(buf, &disk_key, 0);
266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
267 new_root_objectid, &disk_key, level,
272 copy_extent_buffer(cow, buf, 0, 0, cow->len);
273 btrfs_set_header_bytenr(cow, cow->start);
274 btrfs_set_header_generation(cow, trans->transid);
275 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
276 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
277 BTRFS_HEADER_FLAG_RELOC);
278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
279 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
281 btrfs_set_header_owner(cow, new_root_objectid);
283 write_extent_buffer(cow, root->fs_info->fsid,
284 (unsigned long)btrfs_header_fsid(cow),
287 WARN_ON(btrfs_header_generation(buf) > trans->transid);
288 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
289 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
291 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
296 btrfs_mark_buffer_dirty(cow);
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
308 MOD_LOG_ROOT_REPLACE,
311 struct tree_mod_move {
316 struct tree_mod_root {
321 struct tree_mod_elem {
323 u64 index; /* shifted logical */
324 struct seq_list elem;
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key;
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move;
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root;
345 __get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
347 elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
348 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
351 void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
355 spin_lock(&fs_info->tree_mod_seq_lock);
356 __get_tree_mod_seq(fs_info, elem);
357 spin_unlock(&fs_info->tree_mod_seq_lock);
360 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
361 struct seq_list *elem)
363 struct rb_root *tm_root;
364 struct rb_node *node;
365 struct rb_node *next;
366 struct seq_list *cur_elem;
367 struct tree_mod_elem *tm;
368 u64 min_seq = (u64)-1;
369 u64 seq_putting = elem->seq;
374 BUG_ON(!(elem->flags & 1));
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 list_del(&elem->list);
378 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
379 if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
380 if (seq_putting > cur_elem->seq) {
382 * blocker with lower sequence number exists, we
383 * cannot remove anything from the log
387 min_seq = cur_elem->seq;
392 * anything that's lower than the lowest existing (read: blocked)
393 * sequence number can be removed from the tree.
395 write_lock(&fs_info->tree_mod_log_lock);
396 tm_root = &fs_info->tree_mod_log;
397 for (node = rb_first(tm_root); node; node = next) {
398 next = rb_next(node);
399 tm = container_of(node, struct tree_mod_elem, node);
400 if (tm->elem.seq > min_seq)
402 rb_erase(node, tm_root);
403 list_del(&tm->elem.list);
406 write_unlock(&fs_info->tree_mod_log_lock);
408 spin_unlock(&fs_info->tree_mod_seq_lock);
412 * key order of the log:
415 * the index is the shifted logical of the *new* root node for root replace
416 * operations, or the shifted logical of the affected block for all other
420 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
422 struct rb_root *tm_root;
423 struct rb_node **new;
424 struct rb_node *parent = NULL;
425 struct tree_mod_elem *cur;
428 BUG_ON(!tm || !tm->elem.seq);
430 write_lock(&fs_info->tree_mod_log_lock);
431 tm_root = &fs_info->tree_mod_log;
432 new = &tm_root->rb_node;
434 cur = container_of(*new, struct tree_mod_elem, node);
436 if (cur->index < tm->index)
437 new = &((*new)->rb_left);
438 else if (cur->index > tm->index)
439 new = &((*new)->rb_right);
440 else if (cur->elem.seq < tm->elem.seq)
441 new = &((*new)->rb_left);
442 else if (cur->elem.seq > tm->elem.seq)
443 new = &((*new)->rb_right);
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
454 write_unlock(&fs_info->tree_mod_log_lock);
458 int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
459 struct tree_mod_elem **tm_ret)
461 struct tree_mod_elem *tm;
465 if (list_empty(&fs_info->tree_mod_seq_list))
468 tm = *tm_ret = kzalloc(sizeof(*tm), flags);
472 __get_tree_mod_seq(fs_info, &tm->elem);
480 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
481 struct extent_buffer *eb, int slot,
482 enum mod_log_op op, gfp_t flags)
484 struct tree_mod_elem *tm;
487 ret = tree_mod_alloc(fs_info, flags, &tm);
491 tm->index = eb->start >> PAGE_CACHE_SHIFT;
492 if (op != MOD_LOG_KEY_ADD) {
493 btrfs_node_key(eb, &tm->key, slot);
494 tm->blockptr = btrfs_node_blockptr(eb, slot);
498 tm->generation = btrfs_node_ptr_generation(eb, slot);
500 return __tree_mod_log_insert(fs_info, tm);
504 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
505 int slot, enum mod_log_op op)
507 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
511 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
512 struct extent_buffer *eb, int dst_slot, int src_slot,
513 int nr_items, gfp_t flags)
515 struct tree_mod_elem *tm;
519 ret = tree_mod_alloc(fs_info, flags, &tm);
523 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
524 ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
525 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
529 tm->index = eb->start >> PAGE_CACHE_SHIFT;
531 tm->move.dst_slot = dst_slot;
532 tm->move.nr_items = nr_items;
533 tm->op = MOD_LOG_MOVE_KEYS;
535 return __tree_mod_log_insert(fs_info, tm);
539 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
540 struct extent_buffer *old_root,
541 struct extent_buffer *new_root, gfp_t flags)
543 struct tree_mod_elem *tm;
546 ret = tree_mod_alloc(fs_info, flags, &tm);
550 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
551 tm->old_root.logical = old_root->start;
552 tm->old_root.level = btrfs_header_level(old_root);
553 tm->generation = btrfs_header_generation(old_root);
554 tm->op = MOD_LOG_ROOT_REPLACE;
556 return __tree_mod_log_insert(fs_info, tm);
559 static struct tree_mod_elem *
560 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
563 struct rb_root *tm_root;
564 struct rb_node *node;
565 struct tree_mod_elem *cur = NULL;
566 struct tree_mod_elem *found = NULL;
567 u64 index = start >> PAGE_CACHE_SHIFT;
569 read_lock(&fs_info->tree_mod_log_lock);
570 tm_root = &fs_info->tree_mod_log;
571 node = tm_root->rb_node;
573 cur = container_of(node, struct tree_mod_elem, node);
574 if (cur->index < index) {
575 node = node->rb_left;
576 } else if (cur->index > index) {
577 node = node->rb_right;
578 } else if (cur->elem.seq < min_seq) {
579 node = node->rb_left;
580 } else if (!smallest) {
581 /* we want the node with the highest seq */
583 BUG_ON(found->elem.seq > cur->elem.seq);
585 node = node->rb_left;
586 } else if (cur->elem.seq > min_seq) {
587 /* we want the node with the smallest seq */
589 BUG_ON(found->elem.seq < cur->elem.seq);
591 node = node->rb_right;
597 read_unlock(&fs_info->tree_mod_log_lock);
603 * this returns the element from the log with the smallest time sequence
604 * value that's in the log (the oldest log item). any element with a time
605 * sequence lower than min_seq will be ignored.
607 static struct tree_mod_elem *
608 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
611 return __tree_mod_log_search(fs_info, start, min_seq, 1);
615 * this returns the element from the log with the largest time sequence
616 * value that's in the log (the most recent log item). any element with
617 * a time sequence lower than min_seq will be ignored.
619 static struct tree_mod_elem *
620 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
622 return __tree_mod_log_search(fs_info, start, min_seq, 0);
626 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
627 struct extent_buffer *src, unsigned long dst_offset,
628 unsigned long src_offset, int nr_items)
634 if (list_empty(&fs_info->tree_mod_seq_list))
637 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
640 /* speed this up by single seq for all operations? */
641 for (i = 0; i < nr_items; i++) {
642 ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
645 ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
652 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
653 int dst_offset, int src_offset, int nr_items)
656 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
662 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
663 struct extent_buffer *eb,
664 struct btrfs_disk_key *disk_key, int slot, int atomic)
668 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
670 atomic ? GFP_ATOMIC : GFP_NOFS);
674 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
675 struct extent_buffer *eb)
682 if (list_empty(&fs_info->tree_mod_seq_list))
685 if (btrfs_header_level(eb) == 0)
688 nritems = btrfs_header_nritems(eb);
689 for (i = nritems - 1; i >= 0; i--) {
690 ret = tree_mod_log_insert_key(fs_info, eb, i,
691 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
697 tree_mod_log_set_root_pointer(struct btrfs_root *root,
698 struct extent_buffer *new_root_node)
701 tree_mod_log_free_eb(root->fs_info, root->node);
702 ret = tree_mod_log_insert_root(root->fs_info, root->node,
703 new_root_node, GFP_NOFS);
708 * check if the tree block can be shared by multiple trees
710 int btrfs_block_can_be_shared(struct btrfs_root *root,
711 struct extent_buffer *buf)
714 * Tree blocks not in refernece counted trees and tree roots
715 * are never shared. If a block was allocated after the last
716 * snapshot and the block was not allocated by tree relocation,
717 * we know the block is not shared.
719 if (root->ref_cows &&
720 buf != root->node && buf != root->commit_root &&
721 (btrfs_header_generation(buf) <=
722 btrfs_root_last_snapshot(&root->root_item) ||
723 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
725 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
726 if (root->ref_cows &&
727 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
733 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
734 struct btrfs_root *root,
735 struct extent_buffer *buf,
736 struct extent_buffer *cow,
746 * Backrefs update rules:
748 * Always use full backrefs for extent pointers in tree block
749 * allocated by tree relocation.
751 * If a shared tree block is no longer referenced by its owner
752 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
753 * use full backrefs for extent pointers in tree block.
755 * If a tree block is been relocating
756 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
757 * use full backrefs for extent pointers in tree block.
758 * The reason for this is some operations (such as drop tree)
759 * are only allowed for blocks use full backrefs.
762 if (btrfs_block_can_be_shared(root, buf)) {
763 ret = btrfs_lookup_extent_info(trans, root, buf->start,
764 buf->len, &refs, &flags);
769 btrfs_std_error(root->fs_info, ret);
774 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
775 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
776 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
781 owner = btrfs_header_owner(buf);
782 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
783 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
786 if ((owner == root->root_key.objectid ||
787 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
788 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
789 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
790 BUG_ON(ret); /* -ENOMEM */
792 if (root->root_key.objectid ==
793 BTRFS_TREE_RELOC_OBJECTID) {
794 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
795 BUG_ON(ret); /* -ENOMEM */
796 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
797 BUG_ON(ret); /* -ENOMEM */
799 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
802 if (root->root_key.objectid ==
803 BTRFS_TREE_RELOC_OBJECTID)
804 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
806 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
807 BUG_ON(ret); /* -ENOMEM */
809 if (new_flags != 0) {
810 ret = btrfs_set_disk_extent_flags(trans, root,
818 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
819 if (root->root_key.objectid ==
820 BTRFS_TREE_RELOC_OBJECTID)
821 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
823 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
824 BUG_ON(ret); /* -ENOMEM */
825 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
826 BUG_ON(ret); /* -ENOMEM */
829 * don't log freeing in case we're freeing the root node, this
830 * is done by tree_mod_log_set_root_pointer later
832 if (buf != root->node && btrfs_header_level(buf) != 0)
833 tree_mod_log_free_eb(root->fs_info, buf);
834 clean_tree_block(trans, root, buf);
841 * does the dirty work in cow of a single block. The parent block (if
842 * supplied) is updated to point to the new cow copy. The new buffer is marked
843 * dirty and returned locked. If you modify the block it needs to be marked
846 * search_start -- an allocation hint for the new block
848 * empty_size -- a hint that you plan on doing more cow. This is the size in
849 * bytes the allocator should try to find free next to the block it returns.
850 * This is just a hint and may be ignored by the allocator.
852 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
853 struct btrfs_root *root,
854 struct extent_buffer *buf,
855 struct extent_buffer *parent, int parent_slot,
856 struct extent_buffer **cow_ret,
857 u64 search_start, u64 empty_size)
859 struct btrfs_disk_key disk_key;
860 struct extent_buffer *cow;
869 btrfs_assert_tree_locked(buf);
871 WARN_ON(root->ref_cows && trans->transid !=
872 root->fs_info->running_transaction->transid);
873 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
875 level = btrfs_header_level(buf);
878 btrfs_item_key(buf, &disk_key, 0);
880 btrfs_node_key(buf, &disk_key, 0);
882 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
884 parent_start = parent->start;
890 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
891 root->root_key.objectid, &disk_key,
892 level, search_start, empty_size);
896 /* cow is set to blocking by btrfs_init_new_buffer */
898 copy_extent_buffer(cow, buf, 0, 0, cow->len);
899 btrfs_set_header_bytenr(cow, cow->start);
900 btrfs_set_header_generation(cow, trans->transid);
901 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
902 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
903 BTRFS_HEADER_FLAG_RELOC);
904 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
905 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
907 btrfs_set_header_owner(cow, root->root_key.objectid);
909 write_extent_buffer(cow, root->fs_info->fsid,
910 (unsigned long)btrfs_header_fsid(cow),
913 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
915 btrfs_abort_transaction(trans, root, ret);
920 btrfs_reloc_cow_block(trans, root, buf, cow);
922 if (buf == root->node) {
923 WARN_ON(parent && parent != buf);
924 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
925 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
926 parent_start = buf->start;
930 extent_buffer_get(cow);
931 tree_mod_log_set_root_pointer(root, cow);
932 rcu_assign_pointer(root->node, cow);
934 btrfs_free_tree_block(trans, root, buf, parent_start,
936 free_extent_buffer(buf);
937 add_root_to_dirty_list(root);
939 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
940 parent_start = parent->start;
944 WARN_ON(trans->transid != btrfs_header_generation(parent));
945 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
946 MOD_LOG_KEY_REPLACE);
947 btrfs_set_node_blockptr(parent, parent_slot,
949 btrfs_set_node_ptr_generation(parent, parent_slot,
951 btrfs_mark_buffer_dirty(parent);
952 btrfs_free_tree_block(trans, root, buf, parent_start,
956 btrfs_tree_unlock(buf);
957 free_extent_buffer_stale(buf);
958 btrfs_mark_buffer_dirty(cow);
964 * returns the logical address of the oldest predecessor of the given root.
965 * entries older than time_seq are ignored.
967 static struct tree_mod_elem *
968 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
969 struct btrfs_root *root, u64 time_seq)
971 struct tree_mod_elem *tm;
972 struct tree_mod_elem *found = NULL;
973 u64 root_logical = root->node->start;
980 * the very last operation that's logged for a root is the replacement
981 * operation (if it is replaced at all). this has the index of the *new*
982 * root, making it the very first operation that's logged for this root.
985 tm = tree_mod_log_search_oldest(fs_info, root_logical,
990 * we must have key remove operations in the log before the
995 if (tm->op != MOD_LOG_ROOT_REPLACE)
999 root_logical = tm->old_root.logical;
1000 BUG_ON(root_logical == root->node->start);
1008 * tm is a pointer to the first operation to rewind within eb. then, all
1009 * previous operations will be rewinded (until we reach something older than
1013 __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1014 struct tree_mod_elem *first_tm)
1017 struct rb_node *next;
1018 struct tree_mod_elem *tm = first_tm;
1019 unsigned long o_dst;
1020 unsigned long o_src;
1021 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1023 n = btrfs_header_nritems(eb);
1024 while (tm && tm->elem.seq >= time_seq) {
1026 * all the operations are recorded with the operator used for
1027 * the modification. as we're going backwards, we do the
1028 * opposite of each operation here.
1031 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1032 BUG_ON(tm->slot < n);
1033 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1034 case MOD_LOG_KEY_REMOVE:
1035 btrfs_set_node_key(eb, &tm->key, tm->slot);
1036 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1037 btrfs_set_node_ptr_generation(eb, tm->slot,
1041 case MOD_LOG_KEY_REPLACE:
1042 BUG_ON(tm->slot >= n);
1043 btrfs_set_node_key(eb, &tm->key, tm->slot);
1044 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1045 btrfs_set_node_ptr_generation(eb, tm->slot,
1048 case MOD_LOG_KEY_ADD:
1049 if (tm->slot != n - 1) {
1050 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1051 o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
1052 memmove_extent_buffer(eb, o_dst, o_src, p_size);
1056 case MOD_LOG_MOVE_KEYS:
1057 memmove_extent_buffer(eb, tm->slot, tm->move.dst_slot,
1058 tm->move.nr_items * p_size);
1060 case MOD_LOG_ROOT_REPLACE:
1062 * this operation is special. for roots, this must be
1063 * handled explicitly before rewinding.
1064 * for non-roots, this operation may exist if the node
1065 * was a root: root A -> child B; then A gets empty and
1066 * B is promoted to the new root. in the mod log, we'll
1067 * have a root-replace operation for B, a tree block
1068 * that is no root. we simply ignore that operation.
1072 next = rb_next(&tm->node);
1075 tm = container_of(next, struct tree_mod_elem, node);
1076 if (tm->index != first_tm->index)
1079 btrfs_set_header_nritems(eb, n);
1082 static struct extent_buffer *
1083 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1086 struct extent_buffer *eb_rewin;
1087 struct tree_mod_elem *tm;
1092 if (btrfs_header_level(eb) == 0)
1095 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1099 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1100 BUG_ON(tm->slot != 0);
1101 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1102 fs_info->tree_root->nodesize);
1104 btrfs_set_header_bytenr(eb_rewin, eb->start);
1105 btrfs_set_header_backref_rev(eb_rewin,
1106 btrfs_header_backref_rev(eb));
1107 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1109 eb_rewin = btrfs_clone_extent_buffer(eb);
1113 extent_buffer_get(eb_rewin);
1114 free_extent_buffer(eb);
1116 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1121 static inline struct extent_buffer *
1122 get_old_root(struct btrfs_root *root, u64 time_seq)
1124 struct tree_mod_elem *tm;
1125 struct extent_buffer *eb;
1126 struct tree_mod_root *old_root;
1129 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1133 old_root = &tm->old_root;
1134 old_generation = tm->generation;
1136 tm = tree_mod_log_search(root->fs_info, old_root->logical, time_seq);
1138 * there was an item in the log when __tree_mod_log_oldest_root
1139 * returned. this one must not go away, because the time_seq passed to
1140 * us must be blocking its removal.
1144 if (old_root->logical == root->node->start) {
1145 /* there are logged operations for the current root */
1146 eb = btrfs_clone_extent_buffer(root->node);
1148 /* there's a root replace operation for the current root */
1149 eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
1151 btrfs_set_header_bytenr(eb, eb->start);
1152 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1153 btrfs_set_header_owner(eb, root->root_key.objectid);
1157 btrfs_set_header_level(eb, old_root->level);
1158 btrfs_set_header_generation(eb, old_generation);
1159 __tree_mod_log_rewind(eb, time_seq, tm);
1164 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1165 struct btrfs_root *root,
1166 struct extent_buffer *buf)
1168 /* ensure we can see the force_cow */
1172 * We do not need to cow a block if
1173 * 1) this block is not created or changed in this transaction;
1174 * 2) this block does not belong to TREE_RELOC tree;
1175 * 3) the root is not forced COW.
1177 * What is forced COW:
1178 * when we create snapshot during commiting the transaction,
1179 * after we've finished coping src root, we must COW the shared
1180 * block to ensure the metadata consistency.
1182 if (btrfs_header_generation(buf) == trans->transid &&
1183 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1184 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1185 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1192 * cows a single block, see __btrfs_cow_block for the real work.
1193 * This version of it has extra checks so that a block isn't cow'd more than
1194 * once per transaction, as long as it hasn't been written yet
1196 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1197 struct btrfs_root *root, struct extent_buffer *buf,
1198 struct extent_buffer *parent, int parent_slot,
1199 struct extent_buffer **cow_ret)
1204 if (trans->transaction != root->fs_info->running_transaction) {
1205 printk(KERN_CRIT "trans %llu running %llu\n",
1206 (unsigned long long)trans->transid,
1207 (unsigned long long)
1208 root->fs_info->running_transaction->transid);
1211 if (trans->transid != root->fs_info->generation) {
1212 printk(KERN_CRIT "trans %llu running %llu\n",
1213 (unsigned long long)trans->transid,
1214 (unsigned long long)root->fs_info->generation);
1218 if (!should_cow_block(trans, root, buf)) {
1223 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1226 btrfs_set_lock_blocking(parent);
1227 btrfs_set_lock_blocking(buf);
1229 ret = __btrfs_cow_block(trans, root, buf, parent,
1230 parent_slot, cow_ret, search_start, 0);
1232 trace_btrfs_cow_block(root, buf, *cow_ret);
1238 * helper function for defrag to decide if two blocks pointed to by a
1239 * node are actually close by
1241 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1243 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1245 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1251 * compare two keys in a memcmp fashion
1253 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1255 struct btrfs_key k1;
1257 btrfs_disk_key_to_cpu(&k1, disk);
1259 return btrfs_comp_cpu_keys(&k1, k2);
1263 * same as comp_keys only with two btrfs_key's
1265 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1267 if (k1->objectid > k2->objectid)
1269 if (k1->objectid < k2->objectid)
1271 if (k1->type > k2->type)
1273 if (k1->type < k2->type)
1275 if (k1->offset > k2->offset)
1277 if (k1->offset < k2->offset)
1283 * this is used by the defrag code to go through all the
1284 * leaves pointed to by a node and reallocate them so that
1285 * disk order is close to key order
1287 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1288 struct btrfs_root *root, struct extent_buffer *parent,
1289 int start_slot, int cache_only, u64 *last_ret,
1290 struct btrfs_key *progress)
1292 struct extent_buffer *cur;
1295 u64 search_start = *last_ret;
1305 int progress_passed = 0;
1306 struct btrfs_disk_key disk_key;
1308 parent_level = btrfs_header_level(parent);
1309 if (cache_only && parent_level != 1)
1312 if (trans->transaction != root->fs_info->running_transaction)
1314 if (trans->transid != root->fs_info->generation)
1317 parent_nritems = btrfs_header_nritems(parent);
1318 blocksize = btrfs_level_size(root, parent_level - 1);
1319 end_slot = parent_nritems;
1321 if (parent_nritems == 1)
1324 btrfs_set_lock_blocking(parent);
1326 for (i = start_slot; i < end_slot; i++) {
1329 btrfs_node_key(parent, &disk_key, i);
1330 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1333 progress_passed = 1;
1334 blocknr = btrfs_node_blockptr(parent, i);
1335 gen = btrfs_node_ptr_generation(parent, i);
1336 if (last_block == 0)
1337 last_block = blocknr;
1340 other = btrfs_node_blockptr(parent, i - 1);
1341 close = close_blocks(blocknr, other, blocksize);
1343 if (!close && i < end_slot - 2) {
1344 other = btrfs_node_blockptr(parent, i + 1);
1345 close = close_blocks(blocknr, other, blocksize);
1348 last_block = blocknr;
1352 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1354 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1357 if (!cur || !uptodate) {
1359 free_extent_buffer(cur);
1363 cur = read_tree_block(root, blocknr,
1367 } else if (!uptodate) {
1368 btrfs_read_buffer(cur, gen);
1371 if (search_start == 0)
1372 search_start = last_block;
1374 btrfs_tree_lock(cur);
1375 btrfs_set_lock_blocking(cur);
1376 err = __btrfs_cow_block(trans, root, cur, parent, i,
1379 (end_slot - i) * blocksize));
1381 btrfs_tree_unlock(cur);
1382 free_extent_buffer(cur);
1385 search_start = cur->start;
1386 last_block = cur->start;
1387 *last_ret = search_start;
1388 btrfs_tree_unlock(cur);
1389 free_extent_buffer(cur);
1395 * The leaf data grows from end-to-front in the node.
1396 * this returns the address of the start of the last item,
1397 * which is the stop of the leaf data stack
1399 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1400 struct extent_buffer *leaf)
1402 u32 nr = btrfs_header_nritems(leaf);
1404 return BTRFS_LEAF_DATA_SIZE(root);
1405 return btrfs_item_offset_nr(leaf, nr - 1);
1410 * search for key in the extent_buffer. The items start at offset p,
1411 * and they are item_size apart. There are 'max' items in p.
1413 * the slot in the array is returned via slot, and it points to
1414 * the place where you would insert key if it is not found in
1417 * slot may point to max if the key is bigger than all of the keys
1419 static noinline int generic_bin_search(struct extent_buffer *eb,
1421 int item_size, struct btrfs_key *key,
1428 struct btrfs_disk_key *tmp = NULL;
1429 struct btrfs_disk_key unaligned;
1430 unsigned long offset;
1432 unsigned long map_start = 0;
1433 unsigned long map_len = 0;
1436 while (low < high) {
1437 mid = (low + high) / 2;
1438 offset = p + mid * item_size;
1440 if (!kaddr || offset < map_start ||
1441 (offset + sizeof(struct btrfs_disk_key)) >
1442 map_start + map_len) {
1444 err = map_private_extent_buffer(eb, offset,
1445 sizeof(struct btrfs_disk_key),
1446 &kaddr, &map_start, &map_len);
1449 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1452 read_extent_buffer(eb, &unaligned,
1453 offset, sizeof(unaligned));
1458 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1461 ret = comp_keys(tmp, key);
1477 * simple bin_search frontend that does the right thing for
1480 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1481 int level, int *slot)
1484 return generic_bin_search(eb,
1485 offsetof(struct btrfs_leaf, items),
1486 sizeof(struct btrfs_item),
1487 key, btrfs_header_nritems(eb),
1490 return generic_bin_search(eb,
1491 offsetof(struct btrfs_node, ptrs),
1492 sizeof(struct btrfs_key_ptr),
1493 key, btrfs_header_nritems(eb),
1499 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1500 int level, int *slot)
1502 return bin_search(eb, key, level, slot);
1505 static void root_add_used(struct btrfs_root *root, u32 size)
1507 spin_lock(&root->accounting_lock);
1508 btrfs_set_root_used(&root->root_item,
1509 btrfs_root_used(&root->root_item) + size);
1510 spin_unlock(&root->accounting_lock);
1513 static void root_sub_used(struct btrfs_root *root, u32 size)
1515 spin_lock(&root->accounting_lock);
1516 btrfs_set_root_used(&root->root_item,
1517 btrfs_root_used(&root->root_item) - size);
1518 spin_unlock(&root->accounting_lock);
1521 /* given a node and slot number, this reads the blocks it points to. The
1522 * extent buffer is returned with a reference taken (but unlocked).
1523 * NULL is returned on error.
1525 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1526 struct extent_buffer *parent, int slot)
1528 int level = btrfs_header_level(parent);
1531 if (slot >= btrfs_header_nritems(parent))
1536 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
1537 btrfs_level_size(root, level - 1),
1538 btrfs_node_ptr_generation(parent, slot));
1542 * node level balancing, used to make sure nodes are in proper order for
1543 * item deletion. We balance from the top down, so we have to make sure
1544 * that a deletion won't leave an node completely empty later on.
1546 static noinline int balance_level(struct btrfs_trans_handle *trans,
1547 struct btrfs_root *root,
1548 struct btrfs_path *path, int level)
1550 struct extent_buffer *right = NULL;
1551 struct extent_buffer *mid;
1552 struct extent_buffer *left = NULL;
1553 struct extent_buffer *parent = NULL;
1557 int orig_slot = path->slots[level];
1563 mid = path->nodes[level];
1565 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1566 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1567 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1569 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1571 if (level < BTRFS_MAX_LEVEL - 1) {
1572 parent = path->nodes[level + 1];
1573 pslot = path->slots[level + 1];
1577 * deal with the case where there is only one pointer in the root
1578 * by promoting the node below to a root
1581 struct extent_buffer *child;
1583 if (btrfs_header_nritems(mid) != 1)
1586 /* promote the child to a root */
1587 child = read_node_slot(root, mid, 0);
1590 btrfs_std_error(root->fs_info, ret);
1594 btrfs_tree_lock(child);
1595 btrfs_set_lock_blocking(child);
1596 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1598 btrfs_tree_unlock(child);
1599 free_extent_buffer(child);
1603 tree_mod_log_set_root_pointer(root, child);
1604 rcu_assign_pointer(root->node, child);
1606 add_root_to_dirty_list(root);
1607 btrfs_tree_unlock(child);
1609 path->locks[level] = 0;
1610 path->nodes[level] = NULL;
1611 clean_tree_block(trans, root, mid);
1612 btrfs_tree_unlock(mid);
1613 /* once for the path */
1614 free_extent_buffer(mid);
1616 root_sub_used(root, mid->len);
1617 btrfs_free_tree_block(trans, root, mid, 0, 1);
1618 /* once for the root ptr */
1619 free_extent_buffer_stale(mid);
1622 if (btrfs_header_nritems(mid) >
1623 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1626 btrfs_header_nritems(mid);
1628 left = read_node_slot(root, parent, pslot - 1);
1630 btrfs_tree_lock(left);
1631 btrfs_set_lock_blocking(left);
1632 wret = btrfs_cow_block(trans, root, left,
1633 parent, pslot - 1, &left);
1639 right = read_node_slot(root, parent, pslot + 1);
1641 btrfs_tree_lock(right);
1642 btrfs_set_lock_blocking(right);
1643 wret = btrfs_cow_block(trans, root, right,
1644 parent, pslot + 1, &right);
1651 /* first, try to make some room in the middle buffer */
1653 orig_slot += btrfs_header_nritems(left);
1654 wret = push_node_left(trans, root, left, mid, 1);
1657 btrfs_header_nritems(mid);
1661 * then try to empty the right most buffer into the middle
1664 wret = push_node_left(trans, root, mid, right, 1);
1665 if (wret < 0 && wret != -ENOSPC)
1667 if (btrfs_header_nritems(right) == 0) {
1668 clean_tree_block(trans, root, right);
1669 btrfs_tree_unlock(right);
1670 del_ptr(trans, root, path, level + 1, pslot + 1, 1);
1671 root_sub_used(root, right->len);
1672 btrfs_free_tree_block(trans, root, right, 0, 1);
1673 free_extent_buffer_stale(right);
1676 struct btrfs_disk_key right_key;
1677 btrfs_node_key(right, &right_key, 0);
1678 tree_mod_log_set_node_key(root->fs_info, parent,
1679 &right_key, pslot + 1, 0);
1680 btrfs_set_node_key(parent, &right_key, pslot + 1);
1681 btrfs_mark_buffer_dirty(parent);
1684 if (btrfs_header_nritems(mid) == 1) {
1686 * we're not allowed to leave a node with one item in the
1687 * tree during a delete. A deletion from lower in the tree
1688 * could try to delete the only pointer in this node.
1689 * So, pull some keys from the left.
1690 * There has to be a left pointer at this point because
1691 * otherwise we would have pulled some pointers from the
1696 btrfs_std_error(root->fs_info, ret);
1699 wret = balance_node_right(trans, root, mid, left);
1705 wret = push_node_left(trans, root, left, mid, 1);
1711 if (btrfs_header_nritems(mid) == 0) {
1712 clean_tree_block(trans, root, mid);
1713 btrfs_tree_unlock(mid);
1714 del_ptr(trans, root, path, level + 1, pslot, 1);
1715 root_sub_used(root, mid->len);
1716 btrfs_free_tree_block(trans, root, mid, 0, 1);
1717 free_extent_buffer_stale(mid);
1720 /* update the parent key to reflect our changes */
1721 struct btrfs_disk_key mid_key;
1722 btrfs_node_key(mid, &mid_key, 0);
1723 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1725 btrfs_set_node_key(parent, &mid_key, pslot);
1726 btrfs_mark_buffer_dirty(parent);
1729 /* update the path */
1731 if (btrfs_header_nritems(left) > orig_slot) {
1732 extent_buffer_get(left);
1733 /* left was locked after cow */
1734 path->nodes[level] = left;
1735 path->slots[level + 1] -= 1;
1736 path->slots[level] = orig_slot;
1738 btrfs_tree_unlock(mid);
1739 free_extent_buffer(mid);
1742 orig_slot -= btrfs_header_nritems(left);
1743 path->slots[level] = orig_slot;
1746 /* double check we haven't messed things up */
1748 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1752 btrfs_tree_unlock(right);
1753 free_extent_buffer(right);
1756 if (path->nodes[level] != left)
1757 btrfs_tree_unlock(left);
1758 free_extent_buffer(left);
1763 /* Node balancing for insertion. Here we only split or push nodes around
1764 * when they are completely full. This is also done top down, so we
1765 * have to be pessimistic.
1767 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1768 struct btrfs_root *root,
1769 struct btrfs_path *path, int level)
1771 struct extent_buffer *right = NULL;
1772 struct extent_buffer *mid;
1773 struct extent_buffer *left = NULL;
1774 struct extent_buffer *parent = NULL;
1778 int orig_slot = path->slots[level];
1783 mid = path->nodes[level];
1784 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1786 if (level < BTRFS_MAX_LEVEL - 1) {
1787 parent = path->nodes[level + 1];
1788 pslot = path->slots[level + 1];
1794 left = read_node_slot(root, parent, pslot - 1);
1796 /* first, try to make some room in the middle buffer */
1800 btrfs_tree_lock(left);
1801 btrfs_set_lock_blocking(left);
1803 left_nr = btrfs_header_nritems(left);
1804 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1807 ret = btrfs_cow_block(trans, root, left, parent,
1812 wret = push_node_left(trans, root,
1819 struct btrfs_disk_key disk_key;
1820 orig_slot += left_nr;
1821 btrfs_node_key(mid, &disk_key, 0);
1822 tree_mod_log_set_node_key(root->fs_info, parent,
1823 &disk_key, pslot, 0);
1824 btrfs_set_node_key(parent, &disk_key, pslot);
1825 btrfs_mark_buffer_dirty(parent);
1826 if (btrfs_header_nritems(left) > orig_slot) {
1827 path->nodes[level] = left;
1828 path->slots[level + 1] -= 1;
1829 path->slots[level] = orig_slot;
1830 btrfs_tree_unlock(mid);
1831 free_extent_buffer(mid);
1834 btrfs_header_nritems(left);
1835 path->slots[level] = orig_slot;
1836 btrfs_tree_unlock(left);
1837 free_extent_buffer(left);
1841 btrfs_tree_unlock(left);
1842 free_extent_buffer(left);
1844 right = read_node_slot(root, parent, pslot + 1);
1847 * then try to empty the right most buffer into the middle
1852 btrfs_tree_lock(right);
1853 btrfs_set_lock_blocking(right);
1855 right_nr = btrfs_header_nritems(right);
1856 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1859 ret = btrfs_cow_block(trans, root, right,
1865 wret = balance_node_right(trans, root,
1872 struct btrfs_disk_key disk_key;
1874 btrfs_node_key(right, &disk_key, 0);
1875 tree_mod_log_set_node_key(root->fs_info, parent,
1876 &disk_key, pslot + 1, 0);
1877 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1878 btrfs_mark_buffer_dirty(parent);
1880 if (btrfs_header_nritems(mid) <= orig_slot) {
1881 path->nodes[level] = right;
1882 path->slots[level + 1] += 1;
1883 path->slots[level] = orig_slot -
1884 btrfs_header_nritems(mid);
1885 btrfs_tree_unlock(mid);
1886 free_extent_buffer(mid);
1888 btrfs_tree_unlock(right);
1889 free_extent_buffer(right);
1893 btrfs_tree_unlock(right);
1894 free_extent_buffer(right);
1900 * readahead one full node of leaves, finding things that are close
1901 * to the block in 'slot', and triggering ra on them.
1903 static void reada_for_search(struct btrfs_root *root,
1904 struct btrfs_path *path,
1905 int level, int slot, u64 objectid)
1907 struct extent_buffer *node;
1908 struct btrfs_disk_key disk_key;
1914 int direction = path->reada;
1915 struct extent_buffer *eb;
1923 if (!path->nodes[level])
1926 node = path->nodes[level];
1928 search = btrfs_node_blockptr(node, slot);
1929 blocksize = btrfs_level_size(root, level - 1);
1930 eb = btrfs_find_tree_block(root, search, blocksize);
1932 free_extent_buffer(eb);
1938 nritems = btrfs_header_nritems(node);
1942 if (direction < 0) {
1946 } else if (direction > 0) {
1951 if (path->reada < 0 && objectid) {
1952 btrfs_node_key(node, &disk_key, nr);
1953 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1956 search = btrfs_node_blockptr(node, nr);
1957 if ((search <= target && target - search <= 65536) ||
1958 (search > target && search - target <= 65536)) {
1959 gen = btrfs_node_ptr_generation(node, nr);
1960 readahead_tree_block(root, search, blocksize, gen);
1964 if ((nread > 65536 || nscan > 32))
1970 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1973 static noinline int reada_for_balance(struct btrfs_root *root,
1974 struct btrfs_path *path, int level)
1978 struct extent_buffer *parent;
1979 struct extent_buffer *eb;
1986 parent = path->nodes[level + 1];
1990 nritems = btrfs_header_nritems(parent);
1991 slot = path->slots[level + 1];
1992 blocksize = btrfs_level_size(root, level);
1995 block1 = btrfs_node_blockptr(parent, slot - 1);
1996 gen = btrfs_node_ptr_generation(parent, slot - 1);
1997 eb = btrfs_find_tree_block(root, block1, blocksize);
1999 * if we get -eagain from btrfs_buffer_uptodate, we
2000 * don't want to return eagain here. That will loop
2003 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2005 free_extent_buffer(eb);
2007 if (slot + 1 < nritems) {
2008 block2 = btrfs_node_blockptr(parent, slot + 1);
2009 gen = btrfs_node_ptr_generation(parent, slot + 1);
2010 eb = btrfs_find_tree_block(root, block2, blocksize);
2011 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2013 free_extent_buffer(eb);
2015 if (block1 || block2) {
2018 /* release the whole path */
2019 btrfs_release_path(path);
2021 /* read the blocks */
2023 readahead_tree_block(root, block1, blocksize, 0);
2025 readahead_tree_block(root, block2, blocksize, 0);
2028 eb = read_tree_block(root, block1, blocksize, 0);
2029 free_extent_buffer(eb);
2032 eb = read_tree_block(root, block2, blocksize, 0);
2033 free_extent_buffer(eb);
2041 * when we walk down the tree, it is usually safe to unlock the higher layers
2042 * in the tree. The exceptions are when our path goes through slot 0, because
2043 * operations on the tree might require changing key pointers higher up in the
2046 * callers might also have set path->keep_locks, which tells this code to keep
2047 * the lock if the path points to the last slot in the block. This is part of
2048 * walking through the tree, and selecting the next slot in the higher block.
2050 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2051 * if lowest_unlock is 1, level 0 won't be unlocked
2053 static noinline void unlock_up(struct btrfs_path *path, int level,
2054 int lowest_unlock, int min_write_lock_level,
2055 int *write_lock_level)
2058 int skip_level = level;
2060 struct extent_buffer *t;
2062 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2063 if (!path->nodes[i])
2065 if (!path->locks[i])
2067 if (!no_skips && path->slots[i] == 0) {
2071 if (!no_skips && path->keep_locks) {
2074 nritems = btrfs_header_nritems(t);
2075 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2080 if (skip_level < i && i >= lowest_unlock)
2084 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2085 btrfs_tree_unlock_rw(t, path->locks[i]);
2087 if (write_lock_level &&
2088 i > min_write_lock_level &&
2089 i <= *write_lock_level) {
2090 *write_lock_level = i - 1;
2097 * This releases any locks held in the path starting at level and
2098 * going all the way up to the root.
2100 * btrfs_search_slot will keep the lock held on higher nodes in a few
2101 * corner cases, such as COW of the block at slot zero in the node. This
2102 * ignores those rules, and it should only be called when there are no
2103 * more updates to be done higher up in the tree.
2105 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2109 if (path->keep_locks)
2112 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2113 if (!path->nodes[i])
2115 if (!path->locks[i])
2117 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2123 * helper function for btrfs_search_slot. The goal is to find a block
2124 * in cache without setting the path to blocking. If we find the block
2125 * we return zero and the path is unchanged.
2127 * If we can't find the block, we set the path blocking and do some
2128 * reada. -EAGAIN is returned and the search must be repeated.
2131 read_block_for_search(struct btrfs_trans_handle *trans,
2132 struct btrfs_root *root, struct btrfs_path *p,
2133 struct extent_buffer **eb_ret, int level, int slot,
2134 struct btrfs_key *key, u64 time_seq)
2139 struct extent_buffer *b = *eb_ret;
2140 struct extent_buffer *tmp;
2143 blocknr = btrfs_node_blockptr(b, slot);
2144 gen = btrfs_node_ptr_generation(b, slot);
2145 blocksize = btrfs_level_size(root, level - 1);
2147 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2149 /* first we do an atomic uptodate check */
2150 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
2151 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2153 * we found an up to date block without
2160 /* the pages were up to date, but we failed
2161 * the generation number check. Do a full
2162 * read for the generation number that is correct.
2163 * We must do this without dropping locks so
2164 * we can trust our generation number
2166 free_extent_buffer(tmp);
2167 btrfs_set_path_blocking(p);
2169 /* now we're allowed to do a blocking uptodate check */
2170 tmp = read_tree_block(root, blocknr, blocksize, gen);
2171 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
2175 free_extent_buffer(tmp);
2176 btrfs_release_path(p);
2182 * reduce lock contention at high levels
2183 * of the btree by dropping locks before
2184 * we read. Don't release the lock on the current
2185 * level because we need to walk this node to figure
2186 * out which blocks to read.
2188 btrfs_unlock_up_safe(p, level + 1);
2189 btrfs_set_path_blocking(p);
2191 free_extent_buffer(tmp);
2193 reada_for_search(root, p, level, slot, key->objectid);
2195 btrfs_release_path(p);
2198 tmp = read_tree_block(root, blocknr, blocksize, 0);
2201 * If the read above didn't mark this buffer up to date,
2202 * it will never end up being up to date. Set ret to EIO now
2203 * and give up so that our caller doesn't loop forever
2206 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2208 free_extent_buffer(tmp);
2214 * helper function for btrfs_search_slot. This does all of the checks
2215 * for node-level blocks and does any balancing required based on
2218 * If no extra work was required, zero is returned. If we had to
2219 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2223 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2224 struct btrfs_root *root, struct btrfs_path *p,
2225 struct extent_buffer *b, int level, int ins_len,
2226 int *write_lock_level)
2229 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2230 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2233 if (*write_lock_level < level + 1) {
2234 *write_lock_level = level + 1;
2235 btrfs_release_path(p);
2239 sret = reada_for_balance(root, p, level);
2243 btrfs_set_path_blocking(p);
2244 sret = split_node(trans, root, p, level);
2245 btrfs_clear_path_blocking(p, NULL, 0);
2252 b = p->nodes[level];
2253 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2254 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2257 if (*write_lock_level < level + 1) {
2258 *write_lock_level = level + 1;
2259 btrfs_release_path(p);
2263 sret = reada_for_balance(root, p, level);
2267 btrfs_set_path_blocking(p);
2268 sret = balance_level(trans, root, p, level);
2269 btrfs_clear_path_blocking(p, NULL, 0);
2275 b = p->nodes[level];
2277 btrfs_release_path(p);
2280 BUG_ON(btrfs_header_nritems(b) == 1);
2291 * look for key in the tree. path is filled in with nodes along the way
2292 * if key is found, we return zero and you can find the item in the leaf
2293 * level of the path (level 0)
2295 * If the key isn't found, the path points to the slot where it should
2296 * be inserted, and 1 is returned. If there are other errors during the
2297 * search a negative error number is returned.
2299 * if ins_len > 0, nodes and leaves will be split as we walk down the
2300 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2303 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2304 *root, struct btrfs_key *key, struct btrfs_path *p, int
2307 struct extent_buffer *b;
2312 int lowest_unlock = 1;
2314 /* everything at write_lock_level or lower must be write locked */
2315 int write_lock_level = 0;
2316 u8 lowest_level = 0;
2317 int min_write_lock_level;
2319 lowest_level = p->lowest_level;
2320 WARN_ON(lowest_level && ins_len > 0);
2321 WARN_ON(p->nodes[0] != NULL);
2326 /* when we are removing items, we might have to go up to level
2327 * two as we update tree pointers Make sure we keep write
2328 * for those levels as well
2330 write_lock_level = 2;
2331 } else if (ins_len > 0) {
2333 * for inserting items, make sure we have a write lock on
2334 * level 1 so we can update keys
2336 write_lock_level = 1;
2340 write_lock_level = -1;
2342 if (cow && (p->keep_locks || p->lowest_level))
2343 write_lock_level = BTRFS_MAX_LEVEL;
2345 min_write_lock_level = write_lock_level;
2349 * we try very hard to do read locks on the root
2351 root_lock = BTRFS_READ_LOCK;
2353 if (p->search_commit_root) {
2355 * the commit roots are read only
2356 * so we always do read locks
2358 b = root->commit_root;
2359 extent_buffer_get(b);
2360 level = btrfs_header_level(b);
2361 if (!p->skip_locking)
2362 btrfs_tree_read_lock(b);
2364 if (p->skip_locking) {
2365 b = btrfs_root_node(root);
2366 level = btrfs_header_level(b);
2368 /* we don't know the level of the root node
2369 * until we actually have it read locked
2371 b = btrfs_read_lock_root_node(root);
2372 level = btrfs_header_level(b);
2373 if (level <= write_lock_level) {
2374 /* whoops, must trade for write lock */
2375 btrfs_tree_read_unlock(b);
2376 free_extent_buffer(b);
2377 b = btrfs_lock_root_node(root);
2378 root_lock = BTRFS_WRITE_LOCK;
2380 /* the level might have changed, check again */
2381 level = btrfs_header_level(b);
2385 p->nodes[level] = b;
2386 if (!p->skip_locking)
2387 p->locks[level] = root_lock;
2390 level = btrfs_header_level(b);
2393 * setup the path here so we can release it under lock
2394 * contention with the cow code
2398 * if we don't really need to cow this block
2399 * then we don't want to set the path blocking,
2400 * so we test it here
2402 if (!should_cow_block(trans, root, b))
2405 btrfs_set_path_blocking(p);
2408 * must have write locks on this node and the
2411 if (level + 1 > write_lock_level) {
2412 write_lock_level = level + 1;
2413 btrfs_release_path(p);
2417 err = btrfs_cow_block(trans, root, b,
2418 p->nodes[level + 1],
2419 p->slots[level + 1], &b);
2426 BUG_ON(!cow && ins_len);
2428 p->nodes[level] = b;
2429 btrfs_clear_path_blocking(p, NULL, 0);
2432 * we have a lock on b and as long as we aren't changing
2433 * the tree, there is no way to for the items in b to change.
2434 * It is safe to drop the lock on our parent before we
2435 * go through the expensive btree search on b.
2437 * If cow is true, then we might be changing slot zero,
2438 * which may require changing the parent. So, we can't
2439 * drop the lock until after we know which slot we're
2443 btrfs_unlock_up_safe(p, level + 1);
2445 ret = bin_search(b, key, level, &slot);
2449 if (ret && slot > 0) {
2453 p->slots[level] = slot;
2454 err = setup_nodes_for_search(trans, root, p, b, level,
2455 ins_len, &write_lock_level);
2462 b = p->nodes[level];
2463 slot = p->slots[level];
2466 * slot 0 is special, if we change the key
2467 * we have to update the parent pointer
2468 * which means we must have a write lock
2471 if (slot == 0 && cow &&
2472 write_lock_level < level + 1) {
2473 write_lock_level = level + 1;
2474 btrfs_release_path(p);
2478 unlock_up(p, level, lowest_unlock,
2479 min_write_lock_level, &write_lock_level);
2481 if (level == lowest_level) {
2487 err = read_block_for_search(trans, root, p,
2488 &b, level, slot, key, 0);
2496 if (!p->skip_locking) {
2497 level = btrfs_header_level(b);
2498 if (level <= write_lock_level) {
2499 err = btrfs_try_tree_write_lock(b);
2501 btrfs_set_path_blocking(p);
2503 btrfs_clear_path_blocking(p, b,
2506 p->locks[level] = BTRFS_WRITE_LOCK;
2508 err = btrfs_try_tree_read_lock(b);
2510 btrfs_set_path_blocking(p);
2511 btrfs_tree_read_lock(b);
2512 btrfs_clear_path_blocking(p, b,
2515 p->locks[level] = BTRFS_READ_LOCK;
2517 p->nodes[level] = b;
2520 p->slots[level] = slot;
2522 btrfs_leaf_free_space(root, b) < ins_len) {
2523 if (write_lock_level < 1) {
2524 write_lock_level = 1;
2525 btrfs_release_path(p);
2529 btrfs_set_path_blocking(p);
2530 err = split_leaf(trans, root, key,
2531 p, ins_len, ret == 0);
2532 btrfs_clear_path_blocking(p, NULL, 0);
2540 if (!p->search_for_split)
2541 unlock_up(p, level, lowest_unlock,
2542 min_write_lock_level, &write_lock_level);
2549 * we don't really know what they plan on doing with the path
2550 * from here on, so for now just mark it as blocking
2552 if (!p->leave_spinning)
2553 btrfs_set_path_blocking(p);
2555 btrfs_release_path(p);
2560 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2561 * current state of the tree together with the operations recorded in the tree
2562 * modification log to search for the key in a previous version of this tree, as
2563 * denoted by the time_seq parameter.
2565 * Naturally, there is no support for insert, delete or cow operations.
2567 * The resulting path and return value will be set up as if we called
2568 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2570 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2571 struct btrfs_path *p, u64 time_seq)
2573 struct extent_buffer *b;
2578 int lowest_unlock = 1;
2579 u8 lowest_level = 0;
2581 lowest_level = p->lowest_level;
2582 WARN_ON(p->nodes[0] != NULL);
2584 if (p->search_commit_root) {
2586 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2591 b = get_old_root(root, time_seq);
2592 extent_buffer_get(b);
2593 level = btrfs_header_level(b);
2594 btrfs_tree_read_lock(b);
2595 p->locks[level] = BTRFS_READ_LOCK;
2598 level = btrfs_header_level(b);
2599 p->nodes[level] = b;
2600 btrfs_clear_path_blocking(p, NULL, 0);
2603 * we have a lock on b and as long as we aren't changing
2604 * the tree, there is no way to for the items in b to change.
2605 * It is safe to drop the lock on our parent before we
2606 * go through the expensive btree search on b.
2608 btrfs_unlock_up_safe(p, level + 1);
2610 ret = bin_search(b, key, level, &slot);
2614 if (ret && slot > 0) {
2618 p->slots[level] = slot;
2619 unlock_up(p, level, lowest_unlock, 0, NULL);
2621 if (level == lowest_level) {
2627 err = read_block_for_search(NULL, root, p, &b, level,
2628 slot, key, time_seq);
2636 level = btrfs_header_level(b);
2637 err = btrfs_try_tree_read_lock(b);
2639 btrfs_set_path_blocking(p);
2640 btrfs_tree_read_lock(b);
2641 btrfs_clear_path_blocking(p, b,
2644 p->locks[level] = BTRFS_READ_LOCK;
2645 p->nodes[level] = b;
2646 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2647 if (b != p->nodes[level]) {
2648 btrfs_tree_unlock_rw(p->nodes[level],
2650 p->locks[level] = 0;
2651 p->nodes[level] = b;
2654 p->slots[level] = slot;
2655 unlock_up(p, level, lowest_unlock, 0, NULL);
2661 if (!p->leave_spinning)
2662 btrfs_set_path_blocking(p);
2664 btrfs_release_path(p);
2670 * adjust the pointers going up the tree, starting at level
2671 * making sure the right key of each node is points to 'key'.
2672 * This is used after shifting pointers to the left, so it stops
2673 * fixing up pointers when a given leaf/node is not in slot 0 of the
2677 static void fixup_low_keys(struct btrfs_trans_handle *trans,
2678 struct btrfs_root *root, struct btrfs_path *path,
2679 struct btrfs_disk_key *key, int level)
2682 struct extent_buffer *t;
2684 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2685 int tslot = path->slots[i];
2686 if (!path->nodes[i])
2689 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
2690 btrfs_set_node_key(t, key, tslot);
2691 btrfs_mark_buffer_dirty(path->nodes[i]);
2700 * This function isn't completely safe. It's the caller's responsibility
2701 * that the new key won't break the order
2703 void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
2704 struct btrfs_root *root, struct btrfs_path *path,
2705 struct btrfs_key *new_key)
2707 struct btrfs_disk_key disk_key;
2708 struct extent_buffer *eb;
2711 eb = path->nodes[0];
2712 slot = path->slots[0];
2714 btrfs_item_key(eb, &disk_key, slot - 1);
2715 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2717 if (slot < btrfs_header_nritems(eb) - 1) {
2718 btrfs_item_key(eb, &disk_key, slot + 1);
2719 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2722 btrfs_cpu_key_to_disk(&disk_key, new_key);
2723 btrfs_set_item_key(eb, &disk_key, slot);
2724 btrfs_mark_buffer_dirty(eb);
2726 fixup_low_keys(trans, root, path, &disk_key, 1);
2730 * try to push data from one node into the next node left in the
2733 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2734 * error, and > 0 if there was no room in the left hand block.
2736 static int push_node_left(struct btrfs_trans_handle *trans,
2737 struct btrfs_root *root, struct extent_buffer *dst,
2738 struct extent_buffer *src, int empty)
2745 src_nritems = btrfs_header_nritems(src);
2746 dst_nritems = btrfs_header_nritems(dst);
2747 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2748 WARN_ON(btrfs_header_generation(src) != trans->transid);
2749 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2751 if (!empty && src_nritems <= 8)
2754 if (push_items <= 0)
2758 push_items = min(src_nritems, push_items);
2759 if (push_items < src_nritems) {
2760 /* leave at least 8 pointers in the node if
2761 * we aren't going to empty it
2763 if (src_nritems - push_items < 8) {
2764 if (push_items <= 8)
2770 push_items = min(src_nritems - 8, push_items);
2772 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2774 copy_extent_buffer(dst, src,
2775 btrfs_node_key_ptr_offset(dst_nritems),
2776 btrfs_node_key_ptr_offset(0),
2777 push_items * sizeof(struct btrfs_key_ptr));
2779 if (push_items < src_nritems) {
2780 tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
2781 src_nritems - push_items);
2782 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2783 btrfs_node_key_ptr_offset(push_items),
2784 (src_nritems - push_items) *
2785 sizeof(struct btrfs_key_ptr));
2787 btrfs_set_header_nritems(src, src_nritems - push_items);
2788 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2789 btrfs_mark_buffer_dirty(src);
2790 btrfs_mark_buffer_dirty(dst);
2796 * try to push data from one node into the next node right in the
2799 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2800 * error, and > 0 if there was no room in the right hand block.
2802 * this will only push up to 1/2 the contents of the left node over
2804 static int balance_node_right(struct btrfs_trans_handle *trans,
2805 struct btrfs_root *root,
2806 struct extent_buffer *dst,
2807 struct extent_buffer *src)
2815 WARN_ON(btrfs_header_generation(src) != trans->transid);
2816 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2818 src_nritems = btrfs_header_nritems(src);
2819 dst_nritems = btrfs_header_nritems(dst);
2820 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2821 if (push_items <= 0)
2824 if (src_nritems < 4)
2827 max_push = src_nritems / 2 + 1;
2828 /* don't try to empty the node */
2829 if (max_push >= src_nritems)
2832 if (max_push < push_items)
2833 push_items = max_push;
2835 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
2836 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2837 btrfs_node_key_ptr_offset(0),
2839 sizeof(struct btrfs_key_ptr));
2841 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
2842 src_nritems - push_items, push_items);
2843 copy_extent_buffer(dst, src,
2844 btrfs_node_key_ptr_offset(0),
2845 btrfs_node_key_ptr_offset(src_nritems - push_items),
2846 push_items * sizeof(struct btrfs_key_ptr));
2848 btrfs_set_header_nritems(src, src_nritems - push_items);
2849 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2851 btrfs_mark_buffer_dirty(src);
2852 btrfs_mark_buffer_dirty(dst);
2858 * helper function to insert a new root level in the tree.
2859 * A new node is allocated, and a single item is inserted to
2860 * point to the existing root
2862 * returns zero on success or < 0 on failure.
2864 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2865 struct btrfs_root *root,
2866 struct btrfs_path *path, int level)
2869 struct extent_buffer *lower;
2870 struct extent_buffer *c;
2871 struct extent_buffer *old;
2872 struct btrfs_disk_key lower_key;
2874 BUG_ON(path->nodes[level]);
2875 BUG_ON(path->nodes[level-1] != root->node);
2877 lower = path->nodes[level-1];
2879 btrfs_item_key(lower, &lower_key, 0);
2881 btrfs_node_key(lower, &lower_key, 0);
2883 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2884 root->root_key.objectid, &lower_key,
2885 level, root->node->start, 0);
2889 root_add_used(root, root->nodesize);
2891 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2892 btrfs_set_header_nritems(c, 1);
2893 btrfs_set_header_level(c, level);
2894 btrfs_set_header_bytenr(c, c->start);
2895 btrfs_set_header_generation(c, trans->transid);
2896 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2897 btrfs_set_header_owner(c, root->root_key.objectid);
2899 write_extent_buffer(c, root->fs_info->fsid,
2900 (unsigned long)btrfs_header_fsid(c),
2903 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2904 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2907 btrfs_set_node_key(c, &lower_key, 0);
2908 btrfs_set_node_blockptr(c, 0, lower->start);
2909 lower_gen = btrfs_header_generation(lower);
2910 WARN_ON(lower_gen != trans->transid);
2912 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2914 btrfs_mark_buffer_dirty(c);
2917 tree_mod_log_set_root_pointer(root, c);
2918 rcu_assign_pointer(root->node, c);
2920 /* the super has an extra ref to root->node */
2921 free_extent_buffer(old);
2923 add_root_to_dirty_list(root);
2924 extent_buffer_get(c);
2925 path->nodes[level] = c;
2926 path->locks[level] = BTRFS_WRITE_LOCK;
2927 path->slots[level] = 0;
2932 * worker function to insert a single pointer in a node.
2933 * the node should have enough room for the pointer already
2935 * slot and level indicate where you want the key to go, and
2936 * blocknr is the block the key points to.
2938 static void insert_ptr(struct btrfs_trans_handle *trans,
2939 struct btrfs_root *root, struct btrfs_path *path,
2940 struct btrfs_disk_key *key, u64 bytenr,
2941 int slot, int level, int tree_mod_log)
2943 struct extent_buffer *lower;
2947 BUG_ON(!path->nodes[level]);
2948 btrfs_assert_tree_locked(path->nodes[level]);
2949 lower = path->nodes[level];
2950 nritems = btrfs_header_nritems(lower);
2951 BUG_ON(slot > nritems);
2952 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
2953 if (slot != nritems) {
2954 if (tree_mod_log && level)
2955 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
2956 slot, nritems - slot);
2957 memmove_extent_buffer(lower,
2958 btrfs_node_key_ptr_offset(slot + 1),
2959 btrfs_node_key_ptr_offset(slot),
2960 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2962 if (tree_mod_log && level) {
2963 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
2967 btrfs_set_node_key(lower, key, slot);
2968 btrfs_set_node_blockptr(lower, slot, bytenr);
2969 WARN_ON(trans->transid == 0);
2970 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2971 btrfs_set_header_nritems(lower, nritems + 1);
2972 btrfs_mark_buffer_dirty(lower);
2976 * split the node at the specified level in path in two.
2977 * The path is corrected to point to the appropriate node after the split
2979 * Before splitting this tries to make some room in the node by pushing
2980 * left and right, if either one works, it returns right away.
2982 * returns 0 on success and < 0 on failure
2984 static noinline int split_node(struct btrfs_trans_handle *trans,
2985 struct btrfs_root *root,
2986 struct btrfs_path *path, int level)
2988 struct extent_buffer *c;
2989 struct extent_buffer *split;
2990 struct btrfs_disk_key disk_key;
2995 c = path->nodes[level];
2996 WARN_ON(btrfs_header_generation(c) != trans->transid);
2997 if (c == root->node) {
2998 /* trying to split the root, lets make a new one */
2999 ret = insert_new_root(trans, root, path, level + 1);
3003 ret = push_nodes_for_insert(trans, root, path, level);
3004 c = path->nodes[level];
3005 if (!ret && btrfs_header_nritems(c) <
3006 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3012 c_nritems = btrfs_header_nritems(c);
3013 mid = (c_nritems + 1) / 2;
3014 btrfs_node_key(c, &disk_key, mid);
3016 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3017 root->root_key.objectid,
3018 &disk_key, level, c->start, 0);
3020 return PTR_ERR(split);
3022 root_add_used(root, root->nodesize);
3024 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3025 btrfs_set_header_level(split, btrfs_header_level(c));
3026 btrfs_set_header_bytenr(split, split->start);
3027 btrfs_set_header_generation(split, trans->transid);
3028 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3029 btrfs_set_header_owner(split, root->root_key.objectid);
3030 write_extent_buffer(split, root->fs_info->fsid,
3031 (unsigned long)btrfs_header_fsid(split),
3033 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3034 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3037 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3038 copy_extent_buffer(split, c,
3039 btrfs_node_key_ptr_offset(0),
3040 btrfs_node_key_ptr_offset(mid),
3041 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3042 btrfs_set_header_nritems(split, c_nritems - mid);
3043 btrfs_set_header_nritems(c, mid);
3046 btrfs_mark_buffer_dirty(c);
3047 btrfs_mark_buffer_dirty(split);
3049 insert_ptr(trans, root, path, &disk_key, split->start,
3050 path->slots[level + 1] + 1, level + 1, 1);
3052 if (path->slots[level] >= mid) {
3053 path->slots[level] -= mid;
3054 btrfs_tree_unlock(c);
3055 free_extent_buffer(c);
3056 path->nodes[level] = split;
3057 path->slots[level + 1] += 1;
3059 btrfs_tree_unlock(split);
3060 free_extent_buffer(split);
3066 * how many bytes are required to store the items in a leaf. start
3067 * and nr indicate which items in the leaf to check. This totals up the
3068 * space used both by the item structs and the item data
3070 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3073 int nritems = btrfs_header_nritems(l);
3074 int end = min(nritems, start + nr) - 1;
3078 data_len = btrfs_item_end_nr(l, start);
3079 data_len = data_len - btrfs_item_offset_nr(l, end);
3080 data_len += sizeof(struct btrfs_item) * nr;
3081 WARN_ON(data_len < 0);
3086 * The space between the end of the leaf items and
3087 * the start of the leaf data. IOW, how much room
3088 * the leaf has left for both items and data
3090 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3091 struct extent_buffer *leaf)
3093 int nritems = btrfs_header_nritems(leaf);
3095 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3097 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3098 "used %d nritems %d\n",
3099 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3100 leaf_space_used(leaf, 0, nritems), nritems);
3106 * min slot controls the lowest index we're willing to push to the
3107 * right. We'll push up to and including min_slot, but no lower
3109 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3110 struct btrfs_root *root,
3111 struct btrfs_path *path,
3112 int data_size, int empty,
3113 struct extent_buffer *right,
3114 int free_space, u32 left_nritems,
3117 struct extent_buffer *left = path->nodes[0];
3118 struct extent_buffer *upper = path->nodes[1];
3119 struct btrfs_map_token token;
3120 struct btrfs_disk_key disk_key;
3125 struct btrfs_item *item;
3131 btrfs_init_map_token(&token);
3136 nr = max_t(u32, 1, min_slot);
3138 if (path->slots[0] >= left_nritems)
3139 push_space += data_size;
3141 slot = path->slots[1];
3142 i = left_nritems - 1;
3144 item = btrfs_item_nr(left, i);
3146 if (!empty && push_items > 0) {
3147 if (path->slots[0] > i)
3149 if (path->slots[0] == i) {
3150 int space = btrfs_leaf_free_space(root, left);
3151 if (space + push_space * 2 > free_space)
3156 if (path->slots[0] == i)
3157 push_space += data_size;
3159 this_item_size = btrfs_item_size(left, item);
3160 if (this_item_size + sizeof(*item) + push_space > free_space)
3164 push_space += this_item_size + sizeof(*item);
3170 if (push_items == 0)
3173 if (!empty && push_items == left_nritems)
3176 /* push left to right */
3177 right_nritems = btrfs_header_nritems(right);
3179 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3180 push_space -= leaf_data_end(root, left);
3182 /* make room in the right data area */
3183 data_end = leaf_data_end(root, right);
3184 memmove_extent_buffer(right,
3185 btrfs_leaf_data(right) + data_end - push_space,
3186 btrfs_leaf_data(right) + data_end,
3187 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3189 /* copy from the left data area */
3190 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3191 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3192 btrfs_leaf_data(left) + leaf_data_end(root, left),
3195 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3196 btrfs_item_nr_offset(0),
3197 right_nritems * sizeof(struct btrfs_item));
3199 /* copy the items from left to right */
3200 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3201 btrfs_item_nr_offset(left_nritems - push_items),
3202 push_items * sizeof(struct btrfs_item));
3204 /* update the item pointers */
3205 right_nritems += push_items;
3206 btrfs_set_header_nritems(right, right_nritems);
3207 push_space = BTRFS_LEAF_DATA_SIZE(root);
3208 for (i = 0; i < right_nritems; i++) {
3209 item = btrfs_item_nr(right, i);
3210 push_space -= btrfs_token_item_size(right, item, &token);
3211 btrfs_set_token_item_offset(right, item, push_space, &token);
3214 left_nritems -= push_items;
3215 btrfs_set_header_nritems(left, left_nritems);
3218 btrfs_mark_buffer_dirty(left);
3220 clean_tree_block(trans, root, left);
3222 btrfs_mark_buffer_dirty(right);
3224 btrfs_item_key(right, &disk_key, 0);
3225 btrfs_set_node_key(upper, &disk_key, slot + 1);
3226 btrfs_mark_buffer_dirty(upper);
3228 /* then fixup the leaf pointer in the path */
3229 if (path->slots[0] >= left_nritems) {
3230 path->slots[0] -= left_nritems;
3231 if (btrfs_header_nritems(path->nodes[0]) == 0)
3232 clean_tree_block(trans, root, path->nodes[0]);
3233 btrfs_tree_unlock(path->nodes[0]);
3234 free_extent_buffer(path->nodes[0]);
3235 path->nodes[0] = right;
3236 path->slots[1] += 1;
3238 btrfs_tree_unlock(right);
3239 free_extent_buffer(right);
3244 btrfs_tree_unlock(right);
3245 free_extent_buffer(right);
3250 * push some data in the path leaf to the right, trying to free up at
3251 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3253 * returns 1 if the push failed because the other node didn't have enough
3254 * room, 0 if everything worked out and < 0 if there were major errors.
3256 * this will push starting from min_slot to the end of the leaf. It won't
3257 * push any slot lower than min_slot
3259 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3260 *root, struct btrfs_path *path,
3261 int min_data_size, int data_size,
3262 int empty, u32 min_slot)
3264 struct extent_buffer *left = path->nodes[0];
3265 struct extent_buffer *right;
3266 struct extent_buffer *upper;
3272 if (!path->nodes[1])
3275 slot = path->slots[1];
3276 upper = path->nodes[1];
3277 if (slot >= btrfs_header_nritems(upper) - 1)
3280 btrfs_assert_tree_locked(path->nodes[1]);
3282 right = read_node_slot(root, upper, slot + 1);
3286 btrfs_tree_lock(right);
3287 btrfs_set_lock_blocking(right);
3289 free_space = btrfs_leaf_free_space(root, right);
3290 if (free_space < data_size)
3293 /* cow and double check */
3294 ret = btrfs_cow_block(trans, root, right, upper,
3299 free_space = btrfs_leaf_free_space(root, right);
3300 if (free_space < data_size)
3303 left_nritems = btrfs_header_nritems(left);
3304 if (left_nritems == 0)
3307 return __push_leaf_right(trans, root, path, min_data_size, empty,
3308 right, free_space, left_nritems, min_slot);
3310 btrfs_tree_unlock(right);
3311 free_extent_buffer(right);
3316 * push some data in the path leaf to the left, trying to free up at
3317 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3319 * max_slot can put a limit on how far into the leaf we'll push items. The
3320 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3323 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3324 struct btrfs_root *root,
3325 struct btrfs_path *path, int data_size,
3326 int empty, struct extent_buffer *left,
3327 int free_space, u32 right_nritems,
3330 struct btrfs_disk_key disk_key;
3331 struct extent_buffer *right = path->nodes[0];
3335 struct btrfs_item *item;
3336 u32 old_left_nritems;
3340 u32 old_left_item_size;
3341 struct btrfs_map_token token;
3343 btrfs_init_map_token(&token);
3346 nr = min(right_nritems, max_slot);
3348 nr = min(right_nritems - 1, max_slot);
3350 for (i = 0; i < nr; i++) {
3351 item = btrfs_item_nr(right, i);
3353 if (!empty && push_items > 0) {
3354 if (path->slots[0] < i)
3356 if (path->slots[0] == i) {
3357 int space = btrfs_leaf_free_space(root, right);
3358 if (space + push_space * 2 > free_space)
3363 if (path->slots[0] == i)
3364 push_space += data_size;
3366 this_item_size = btrfs_item_size(right, item);
3367 if (this_item_size + sizeof(*item) + push_space > free_space)
3371 push_space += this_item_size + sizeof(*item);
3374 if (push_items == 0) {
3378 if (!empty && push_items == btrfs_header_nritems(right))
3381 /* push data from right to left */
3382 copy_extent_buffer(left, right,
3383 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3384 btrfs_item_nr_offset(0),
3385 push_items * sizeof(struct btrfs_item));
3387 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3388 btrfs_item_offset_nr(right, push_items - 1);
3390 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3391 leaf_data_end(root, left) - push_space,
3392 btrfs_leaf_data(right) +
3393 btrfs_item_offset_nr(right, push_items - 1),
3395 old_left_nritems = btrfs_header_nritems(left);
3396 BUG_ON(old_left_nritems <= 0);
3398 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3399 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3402 item = btrfs_item_nr(left, i);
3404 ioff = btrfs_token_item_offset(left, item, &token);
3405 btrfs_set_token_item_offset(left, item,
3406 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3409 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3411 /* fixup right node */
3412 if (push_items > right_nritems) {
3413 printk(KERN_CRIT "push items %d nr %u\n", push_items,
3418 if (push_items < right_nritems) {
3419 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3420 leaf_data_end(root, right);
3421 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3422 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3423 btrfs_leaf_data(right) +
3424 leaf_data_end(root, right), push_space);
3426 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3427 btrfs_item_nr_offset(push_items),
3428 (btrfs_header_nritems(right) - push_items) *
3429 sizeof(struct btrfs_item));
3431 right_nritems -= push_items;
3432 btrfs_set_header_nritems(right, right_nritems);
3433 push_space = BTRFS_LEAF_DATA_SIZE(root);
3434 for (i = 0; i < right_nritems; i++) {
3435 item = btrfs_item_nr(right, i);
3437 push_space = push_space - btrfs_token_item_size(right,
3439 btrfs_set_token_item_offset(right, item, push_space, &token);
3442 btrfs_mark_buffer_dirty(left);
3444 btrfs_mark_buffer_dirty(right);
3446 clean_tree_block(trans, root, right);
3448 btrfs_item_key(right, &disk_key, 0);
3449 fixup_low_keys(trans, root, path, &disk_key, 1);
3451 /* then fixup the leaf pointer in the path */
3452 if (path->slots[0] < push_items) {
3453 path->slots[0] += old_left_nritems;
3454 btrfs_tree_unlock(path->nodes[0]);
3455 free_extent_buffer(path->nodes[0]);
3456 path->nodes[0] = left;
3457 path->slots[1] -= 1;
3459 btrfs_tree_unlock(left);
3460 free_extent_buffer(left);
3461 path->slots[0] -= push_items;
3463 BUG_ON(path->slots[0] < 0);
3466 btrfs_tree_unlock(left);
3467 free_extent_buffer(left);
3472 * push some data in the path leaf to the left, trying to free up at
3473 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3475 * max_slot can put a limit on how far into the leaf we'll push items. The
3476 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3479 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3480 *root, struct btrfs_path *path, int min_data_size,
3481 int data_size, int empty, u32 max_slot)
3483 struct extent_buffer *right = path->nodes[0];
3484 struct extent_buffer *left;
3490 slot = path->slots[1];
3493 if (!path->nodes[1])
3496 right_nritems = btrfs_header_nritems(right);
3497 if (right_nritems == 0)
3500 btrfs_assert_tree_locked(path->nodes[1]);
3502 left = read_node_slot(root, path->nodes[1], slot - 1);
3506 btrfs_tree_lock(left);
3507 btrfs_set_lock_blocking(left);
3509 free_space = btrfs_leaf_free_space(root, left);
3510 if (free_space < data_size) {
3515 /* cow and double check */
3516 ret = btrfs_cow_block(trans, root, left,
3517 path->nodes[1], slot - 1, &left);
3519 /* we hit -ENOSPC, but it isn't fatal here */
3525 free_space = btrfs_leaf_free_space(root, left);
3526 if (free_space < data_size) {
3531 return __push_leaf_left(trans, root, path, min_data_size,
3532 empty, left, free_space, right_nritems,
3535 btrfs_tree_unlock(left);
3536 free_extent_buffer(left);
3541 * split the path's leaf in two, making sure there is at least data_size
3542 * available for the resulting leaf level of the path.
3544 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3545 struct btrfs_root *root,
3546 struct btrfs_path *path,
3547 struct extent_buffer *l,
3548 struct extent_buffer *right,
3549 int slot, int mid, int nritems)
3554 struct btrfs_disk_key disk_key;
3555 struct btrfs_map_token token;
3557 btrfs_init_map_token(&token);
3559 nritems = nritems - mid;
3560 btrfs_set_header_nritems(right, nritems);
3561 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3563 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3564 btrfs_item_nr_offset(mid),
3565 nritems * sizeof(struct btrfs_item));
3567 copy_extent_buffer(right, l,
3568 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3569 data_copy_size, btrfs_leaf_data(l) +
3570 leaf_data_end(root, l), data_copy_size);
3572 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3573 btrfs_item_end_nr(l, mid);
3575 for (i = 0; i < nritems; i++) {
3576 struct btrfs_item *item = btrfs_item_nr(right, i);
3579 ioff = btrfs_token_item_offset(right, item, &token);
3580 btrfs_set_token_item_offset(right, item,
3581 ioff + rt_data_off, &token);
3584 btrfs_set_header_nritems(l, mid);
3585 btrfs_item_key(right, &disk_key, 0);
3586 insert_ptr(trans, root, path, &disk_key, right->start,
3587 path->slots[1] + 1, 1, 0);
3589 btrfs_mark_buffer_dirty(right);
3590 btrfs_mark_buffer_dirty(l);
3591 BUG_ON(path->slots[0] != slot);
3594 btrfs_tree_unlock(path->nodes[0]);
3595 free_extent_buffer(path->nodes[0]);
3596 path->nodes[0] = right;
3597 path->slots[0] -= mid;
3598 path->slots[1] += 1;
3600 btrfs_tree_unlock(right);
3601 free_extent_buffer(right);
3604 BUG_ON(path->slots[0] < 0);
3608 * double splits happen when we need to insert a big item in the middle
3609 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3610 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3613 * We avoid this by trying to push the items on either side of our target
3614 * into the adjacent leaves. If all goes well we can avoid the double split
3617 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3618 struct btrfs_root *root,
3619 struct btrfs_path *path,
3627 slot = path->slots[0];
3630 * try to push all the items after our slot into the
3633 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3640 nritems = btrfs_header_nritems(path->nodes[0]);
3642 * our goal is to get our slot at the start or end of a leaf. If
3643 * we've done so we're done
3645 if (path->slots[0] == 0 || path->slots[0] == nritems)
3648 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3651 /* try to push all the items before our slot into the next leaf */
3652 slot = path->slots[0];
3653 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3666 * split the path's leaf in two, making sure there is at least data_size
3667 * available for the resulting leaf level of the path.
3669 * returns 0 if all went well and < 0 on failure.
3671 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3672 struct btrfs_root *root,
3673 struct btrfs_key *ins_key,
3674 struct btrfs_path *path, int data_size,
3677 struct btrfs_disk_key disk_key;
3678 struct extent_buffer *l;
3682 struct extent_buffer *right;
3686 int num_doubles = 0;
3687 int tried_avoid_double = 0;
3690 slot = path->slots[0];
3691 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3692 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3695 /* first try to make some room by pushing left and right */
3697 wret = push_leaf_right(trans, root, path, data_size,
3702 wret = push_leaf_left(trans, root, path, data_size,
3703 data_size, 0, (u32)-1);
3709 /* did the pushes work? */
3710 if (btrfs_leaf_free_space(root, l) >= data_size)
3714 if (!path->nodes[1]) {
3715 ret = insert_new_root(trans, root, path, 1);
3722 slot = path->slots[0];
3723 nritems = btrfs_header_nritems(l);
3724 mid = (nritems + 1) / 2;
3728 leaf_space_used(l, mid, nritems - mid) + data_size >
3729 BTRFS_LEAF_DATA_SIZE(root)) {
3730 if (slot >= nritems) {
3734 if (mid != nritems &&
3735 leaf_space_used(l, mid, nritems - mid) +
3736 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3737 if (data_size && !tried_avoid_double)
3738 goto push_for_double;
3744 if (leaf_space_used(l, 0, mid) + data_size >
3745 BTRFS_LEAF_DATA_SIZE(root)) {
3746 if (!extend && data_size && slot == 0) {
3748 } else if ((extend || !data_size) && slot == 0) {
3752 if (mid != nritems &&
3753 leaf_space_used(l, mid, nritems - mid) +
3754 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3755 if (data_size && !tried_avoid_double)
3756 goto push_for_double;
3764 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3766 btrfs_item_key(l, &disk_key, mid);
3768 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
3769 root->root_key.objectid,
3770 &disk_key, 0, l->start, 0);
3772 return PTR_ERR(right);
3774 root_add_used(root, root->leafsize);
3776 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
3777 btrfs_set_header_bytenr(right, right->start);
3778 btrfs_set_header_generation(right, trans->transid);
3779 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
3780 btrfs_set_header_owner(right, root->root_key.objectid);
3781 btrfs_set_header_level(right, 0);
3782 write_extent_buffer(right, root->fs_info->fsid,
3783 (unsigned long)btrfs_header_fsid(right),
3786 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3787 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3792 btrfs_set_header_nritems(right, 0);
3793 insert_ptr(trans, root, path, &disk_key, right->start,
3794 path->slots[1] + 1, 1, 0);
3795 btrfs_tree_unlock(path->nodes[0]);
3796 free_extent_buffer(path->nodes[0]);
3797 path->nodes[0] = right;
3799 path->slots[1] += 1;
3801 btrfs_set_header_nritems(right, 0);
3802 insert_ptr(trans, root, path, &disk_key, right->start,
3803 path->slots[1], 1, 0);
3804 btrfs_tree_unlock(path->nodes[0]);
3805 free_extent_buffer(path->nodes[0]);
3806 path->nodes[0] = right;
3808 if (path->slots[1] == 0)
3809 fixup_low_keys(trans, root, path,
3812 btrfs_mark_buffer_dirty(right);
3816 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3819 BUG_ON(num_doubles != 0);
3827 push_for_double_split(trans, root, path, data_size);
3828 tried_avoid_double = 1;
3829 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3834 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3835 struct btrfs_root *root,
3836 struct btrfs_path *path, int ins_len)
3838 struct btrfs_key key;
3839 struct extent_buffer *leaf;
3840 struct btrfs_file_extent_item *fi;
3845 leaf = path->nodes[0];
3846 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3848 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3849 key.type != BTRFS_EXTENT_CSUM_KEY);
3851 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3854 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3855 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3856 fi = btrfs_item_ptr(leaf, path->slots[0],
3857 struct btrfs_file_extent_item);
3858 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3860 btrfs_release_path(path);
3862 path->keep_locks = 1;
3863 path->search_for_split = 1;
3864 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3865 path->search_for_split = 0;
3870 leaf = path->nodes[0];
3871 /* if our item isn't there or got smaller, return now */
3872 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3875 /* the leaf has changed, it now has room. return now */
3876 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3879 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3880 fi = btrfs_item_ptr(leaf, path->slots[0],
3881 struct btrfs_file_extent_item);
3882 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3886 btrfs_set_path_blocking(path);
3887 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3891 path->keep_locks = 0;
3892 btrfs_unlock_up_safe(path, 1);
3895 path->keep_locks = 0;
3899 static noinline int split_item(struct btrfs_trans_handle *trans,
3900 struct btrfs_root *root,
3901 struct btrfs_path *path,
3902 struct btrfs_key *new_key,
3903 unsigned long split_offset)
3905 struct extent_buffer *leaf;
3906 struct btrfs_item *item;
3907 struct btrfs_item *new_item;
3913 struct btrfs_disk_key disk_key;
3915 leaf = path->nodes[0];
3916 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3918 btrfs_set_path_blocking(path);
3920 item = btrfs_item_nr(leaf, path->slots[0]);
3921 orig_offset = btrfs_item_offset(leaf, item);
3922 item_size = btrfs_item_size(leaf, item);
3924 buf = kmalloc(item_size, GFP_NOFS);
3928 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3929 path->slots[0]), item_size);
3931 slot = path->slots[0] + 1;
3932 nritems = btrfs_header_nritems(leaf);
3933 if (slot != nritems) {
3934 /* shift the items */
3935 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3936 btrfs_item_nr_offset(slot),
3937 (nritems - slot) * sizeof(struct btrfs_item));
3940 btrfs_cpu_key_to_disk(&disk_key, new_key);
3941 btrfs_set_item_key(leaf, &disk_key, slot);
3943 new_item = btrfs_item_nr(leaf, slot);
3945 btrfs_set_item_offset(leaf, new_item, orig_offset);
3946 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3948 btrfs_set_item_offset(leaf, item,
3949 orig_offset + item_size - split_offset);
3950 btrfs_set_item_size(leaf, item, split_offset);
3952 btrfs_set_header_nritems(leaf, nritems + 1);
3954 /* write the data for the start of the original item */
3955 write_extent_buffer(leaf, buf,
3956 btrfs_item_ptr_offset(leaf, path->slots[0]),
3959 /* write the data for the new item */
3960 write_extent_buffer(leaf, buf + split_offset,
3961 btrfs_item_ptr_offset(leaf, slot),
3962 item_size - split_offset);
3963 btrfs_mark_buffer_dirty(leaf);
3965 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3971 * This function splits a single item into two items,
3972 * giving 'new_key' to the new item and splitting the
3973 * old one at split_offset (from the start of the item).
3975 * The path may be released by this operation. After
3976 * the split, the path is pointing to the old item. The
3977 * new item is going to be in the same node as the old one.
3979 * Note, the item being split must be smaller enough to live alone on
3980 * a tree block with room for one extra struct btrfs_item
3982 * This allows us to split the item in place, keeping a lock on the
3983 * leaf the entire time.
3985 int btrfs_split_item(struct btrfs_trans_handle *trans,
3986 struct btrfs_root *root,
3987 struct btrfs_path *path,
3988 struct btrfs_key *new_key,
3989 unsigned long split_offset)
3992 ret = setup_leaf_for_split(trans, root, path,
3993 sizeof(struct btrfs_item));
3997 ret = split_item(trans, root, path, new_key, split_offset);
4002 * This function duplicate a item, giving 'new_key' to the new item.
4003 * It guarantees both items live in the same tree leaf and the new item
4004 * is contiguous with the original item.
4006 * This allows us to split file extent in place, keeping a lock on the
4007 * leaf the entire time.
4009 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4010 struct btrfs_root *root,
4011 struct btrfs_path *path,
4012 struct btrfs_key *new_key)
4014 struct extent_buffer *leaf;
4018 leaf = path->nodes[0];
4019 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4020 ret = setup_leaf_for_split(trans, root, path,
4021 item_size + sizeof(struct btrfs_item));
4026 setup_items_for_insert(trans, root, path, new_key, &item_size,
4027 item_size, item_size +
4028 sizeof(struct btrfs_item), 1);
4029 leaf = path->nodes[0];
4030 memcpy_extent_buffer(leaf,
4031 btrfs_item_ptr_offset(leaf, path->slots[0]),
4032 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4038 * make the item pointed to by the path smaller. new_size indicates
4039 * how small to make it, and from_end tells us if we just chop bytes
4040 * off the end of the item or if we shift the item to chop bytes off
4043 void btrfs_truncate_item(struct btrfs_trans_handle *trans,
4044 struct btrfs_root *root,
4045 struct btrfs_path *path,
4046 u32 new_size, int from_end)
4049 struct extent_buffer *leaf;
4050 struct btrfs_item *item;
4052 unsigned int data_end;
4053 unsigned int old_data_start;
4054 unsigned int old_size;
4055 unsigned int size_diff;
4057 struct btrfs_map_token token;
4059 btrfs_init_map_token(&token);
4061 leaf = path->nodes[0];
4062 slot = path->slots[0];
4064 old_size = btrfs_item_size_nr(leaf, slot);
4065 if (old_size == new_size)
4068 nritems = btrfs_header_nritems(leaf);
4069 data_end = leaf_data_end(root, leaf);
4071 old_data_start = btrfs_item_offset_nr(leaf, slot);
4073 size_diff = old_size - new_size;
4076 BUG_ON(slot >= nritems);
4079 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4081 /* first correct the data pointers */
4082 for (i = slot; i < nritems; i++) {
4084 item = btrfs_item_nr(leaf, i);
4086 ioff = btrfs_token_item_offset(leaf, item, &token);
4087 btrfs_set_token_item_offset(leaf, item,
4088 ioff + size_diff, &token);
4091 /* shift the data */
4093 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4094 data_end + size_diff, btrfs_leaf_data(leaf) +
4095 data_end, old_data_start + new_size - data_end);
4097 struct btrfs_disk_key disk_key;
4100 btrfs_item_key(leaf, &disk_key, slot);
4102 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4104 struct btrfs_file_extent_item *fi;
4106 fi = btrfs_item_ptr(leaf, slot,
4107 struct btrfs_file_extent_item);
4108 fi = (struct btrfs_file_extent_item *)(
4109 (unsigned long)fi - size_diff);
4111 if (btrfs_file_extent_type(leaf, fi) ==
4112 BTRFS_FILE_EXTENT_INLINE) {
4113 ptr = btrfs_item_ptr_offset(leaf, slot);
4114 memmove_extent_buffer(leaf, ptr,
4116 offsetof(struct btrfs_file_extent_item,
4121 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4122 data_end + size_diff, btrfs_leaf_data(leaf) +
4123 data_end, old_data_start - data_end);
4125 offset = btrfs_disk_key_offset(&disk_key);
4126 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4127 btrfs_set_item_key(leaf, &disk_key, slot);
4129 fixup_low_keys(trans, root, path, &disk_key, 1);
4132 item = btrfs_item_nr(leaf, slot);
4133 btrfs_set_item_size(leaf, item, new_size);
4134 btrfs_mark_buffer_dirty(leaf);
4136 if (btrfs_leaf_free_space(root, leaf) < 0) {
4137 btrfs_print_leaf(root, leaf);
4143 * make the item pointed to by the path bigger, data_size is the new size.
4145 void btrfs_extend_item(struct btrfs_trans_handle *trans,
4146 struct btrfs_root *root, struct btrfs_path *path,
4150 struct extent_buffer *leaf;
4151 struct btrfs_item *item;
4153 unsigned int data_end;
4154 unsigned int old_data;
4155 unsigned int old_size;
4157 struct btrfs_map_token token;
4159 btrfs_init_map_token(&token);
4161 leaf = path->nodes[0];
4163 nritems = btrfs_header_nritems(leaf);
4164 data_end = leaf_data_end(root, leaf);
4166 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4167 btrfs_print_leaf(root, leaf);
4170 slot = path->slots[0];
4171 old_data = btrfs_item_end_nr(leaf, slot);
4174 if (slot >= nritems) {
4175 btrfs_print_leaf(root, leaf);
4176 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4182 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4184 /* first correct the data pointers */
4185 for (i = slot; i < nritems; i++) {
4187 item = btrfs_item_nr(leaf, i);
4189 ioff = btrfs_token_item_offset(leaf, item, &token);
4190 btrfs_set_token_item_offset(leaf, item,
4191 ioff - data_size, &token);
4194 /* shift the data */
4195 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4196 data_end - data_size, btrfs_leaf_data(leaf) +
4197 data_end, old_data - data_end);
4199 data_end = old_data;
4200 old_size = btrfs_item_size_nr(leaf, slot);
4201 item = btrfs_item_nr(leaf, slot);
4202 btrfs_set_item_size(leaf, item, old_size + data_size);
4203 btrfs_mark_buffer_dirty(leaf);
4205 if (btrfs_leaf_free_space(root, leaf) < 0) {
4206 btrfs_print_leaf(root, leaf);
4212 * Given a key and some data, insert items into the tree.
4213 * This does all the path init required, making room in the tree if needed.
4214 * Returns the number of keys that were inserted.
4216 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
4217 struct btrfs_root *root,
4218 struct btrfs_path *path,
4219 struct btrfs_key *cpu_key, u32 *data_size,
4222 struct extent_buffer *leaf;
4223 struct btrfs_item *item;
4230 unsigned int data_end;
4231 struct btrfs_disk_key disk_key;
4232 struct btrfs_key found_key;
4233 struct btrfs_map_token token;
4235 btrfs_init_map_token(&token);
4237 for (i = 0; i < nr; i++) {
4238 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
4239 BTRFS_LEAF_DATA_SIZE(root)) {
4243 total_data += data_size[i];
4244 total_size += data_size[i] + sizeof(struct btrfs_item);
4248 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4254 leaf = path->nodes[0];
4256 nritems = btrfs_header_nritems(leaf);
4257 data_end = leaf_data_end(root, leaf);
4259 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4260 for (i = nr; i >= 0; i--) {
4261 total_data -= data_size[i];
4262 total_size -= data_size[i] + sizeof(struct btrfs_item);
4263 if (total_size < btrfs_leaf_free_space(root, leaf))
4269 slot = path->slots[0];
4272 if (slot != nritems) {
4273 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4275 item = btrfs_item_nr(leaf, slot);
4276 btrfs_item_key_to_cpu(leaf, &found_key, slot);
4278 /* figure out how many keys we can insert in here */
4279 total_data = data_size[0];
4280 for (i = 1; i < nr; i++) {
4281 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
4283 total_data += data_size[i];
4287 if (old_data < data_end) {
4288 btrfs_print_leaf(root, leaf);
4289 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4290 slot, old_data, data_end);
4294 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4296 /* first correct the data pointers */
4297 for (i = slot; i < nritems; i++) {
4300 item = btrfs_item_nr(leaf, i);
4301 ioff = btrfs_token_item_offset(leaf, item, &token);
4302 btrfs_set_token_item_offset(leaf, item,
4303 ioff - total_data, &token);
4305 /* shift the items */
4306 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4307 btrfs_item_nr_offset(slot),
4308 (nritems - slot) * sizeof(struct btrfs_item));
4310 /* shift the data */
4311 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4312 data_end - total_data, btrfs_leaf_data(leaf) +
4313 data_end, old_data - data_end);
4314 data_end = old_data;
4317 * this sucks but it has to be done, if we are inserting at
4318 * the end of the leaf only insert 1 of the items, since we
4319 * have no way of knowing whats on the next leaf and we'd have
4320 * to drop our current locks to figure it out
4325 /* setup the item for the new data */
4326 for (i = 0; i < nr; i++) {
4327 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4328 btrfs_set_item_key(leaf, &disk_key, slot + i);
4329 item = btrfs_item_nr(leaf, slot + i);
4330 btrfs_set_token_item_offset(leaf, item,
4331 data_end - data_size[i], &token);
4332 data_end -= data_size[i];
4333 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4335 btrfs_set_header_nritems(leaf, nritems + nr);
4336 btrfs_mark_buffer_dirty(leaf);
4340 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4341 fixup_low_keys(trans, root, path, &disk_key, 1);
4344 if (btrfs_leaf_free_space(root, leaf) < 0) {
4345 btrfs_print_leaf(root, leaf);
4355 * this is a helper for btrfs_insert_empty_items, the main goal here is
4356 * to save stack depth by doing the bulk of the work in a function
4357 * that doesn't call btrfs_search_slot
4359 void setup_items_for_insert(struct btrfs_trans_handle *trans,
4360 struct btrfs_root *root, struct btrfs_path *path,
4361 struct btrfs_key *cpu_key, u32 *data_size,
4362 u32 total_data, u32 total_size, int nr)
4364 struct btrfs_item *item;
4367 unsigned int data_end;
4368 struct btrfs_disk_key disk_key;
4369 struct extent_buffer *leaf;
4371 struct btrfs_map_token token;
4373 btrfs_init_map_token(&token);
4375 leaf = path->nodes[0];
4376 slot = path->slots[0];
4378 nritems = btrfs_header_nritems(leaf);
4379 data_end = leaf_data_end(root, leaf);
4381 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4382 btrfs_print_leaf(root, leaf);
4383 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4384 total_size, btrfs_leaf_free_space(root, leaf));
4388 if (slot != nritems) {
4389 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4391 if (old_data < data_end) {
4392 btrfs_print_leaf(root, leaf);
4393 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4394 slot, old_data, data_end);
4398 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4400 /* first correct the data pointers */
4401 for (i = slot; i < nritems; i++) {
4404 item = btrfs_item_nr(leaf, i);
4405 ioff = btrfs_token_item_offset(leaf, item, &token);
4406 btrfs_set_token_item_offset(leaf, item,
4407 ioff - total_data, &token);
4409 /* shift the items */
4410 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4411 btrfs_item_nr_offset(slot),
4412 (nritems - slot) * sizeof(struct btrfs_item));
4414 /* shift the data */
4415 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4416 data_end - total_data, btrfs_leaf_data(leaf) +
4417 data_end, old_data - data_end);
4418 data_end = old_data;
4421 /* setup the item for the new data */
4422 for (i = 0; i < nr; i++) {
4423 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4424 btrfs_set_item_key(leaf, &disk_key, slot + i);
4425 item = btrfs_item_nr(leaf, slot + i);
4426 btrfs_set_token_item_offset(leaf, item,
4427 data_end - data_size[i], &token);
4428 data_end -= data_size[i];
4429 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4432 btrfs_set_header_nritems(leaf, nritems + nr);
4435 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4436 fixup_low_keys(trans, root, path, &disk_key, 1);
4438 btrfs_unlock_up_safe(path, 1);
4439 btrfs_mark_buffer_dirty(leaf);
4441 if (btrfs_leaf_free_space(root, leaf) < 0) {
4442 btrfs_print_leaf(root, leaf);
4448 * Given a key and some data, insert items into the tree.
4449 * This does all the path init required, making room in the tree if needed.
4451 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4452 struct btrfs_root *root,
4453 struct btrfs_path *path,
4454 struct btrfs_key *cpu_key, u32 *data_size,
4463 for (i = 0; i < nr; i++)
4464 total_data += data_size[i];
4466 total_size = total_data + (nr * sizeof(struct btrfs_item));
4467 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4473 slot = path->slots[0];
4476 setup_items_for_insert(trans, root, path, cpu_key, data_size,
4477 total_data, total_size, nr);
4482 * Given a key and some data, insert an item into the tree.
4483 * This does all the path init required, making room in the tree if needed.
4485 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4486 *root, struct btrfs_key *cpu_key, void *data, u32
4490 struct btrfs_path *path;
4491 struct extent_buffer *leaf;
4494 path = btrfs_alloc_path();
4497 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4499 leaf = path->nodes[0];
4500 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4501 write_extent_buffer(leaf, data, ptr, data_size);
4502 btrfs_mark_buffer_dirty(leaf);
4504 btrfs_free_path(path);
4509 * delete the pointer from a given node.
4511 * the tree should have been previously balanced so the deletion does not
4514 static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4515 struct btrfs_path *path, int level, int slot,
4518 struct extent_buffer *parent = path->nodes[level];
4522 nritems = btrfs_header_nritems(parent);
4523 if (slot != nritems - 1) {
4524 if (tree_mod_log && level)
4525 tree_mod_log_eb_move(root->fs_info, parent, slot,
4526 slot + 1, nritems - slot - 1);
4527 memmove_extent_buffer(parent,
4528 btrfs_node_key_ptr_offset(slot),
4529 btrfs_node_key_ptr_offset(slot + 1),
4530 sizeof(struct btrfs_key_ptr) *
4531 (nritems - slot - 1));
4534 if (tree_mod_log && level) {
4535 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4536 MOD_LOG_KEY_REMOVE);
4541 btrfs_set_header_nritems(parent, nritems);
4542 if (nritems == 0 && parent == root->node) {
4543 BUG_ON(btrfs_header_level(root->node) != 1);
4544 /* just turn the root into a leaf and break */
4545 btrfs_set_header_level(root->node, 0);
4546 } else if (slot == 0) {
4547 struct btrfs_disk_key disk_key;
4549 btrfs_node_key(parent, &disk_key, 0);
4550 fixup_low_keys(trans, root, path, &disk_key, level + 1);
4552 btrfs_mark_buffer_dirty(parent);
4556 * a helper function to delete the leaf pointed to by path->slots[1] and
4559 * This deletes the pointer in path->nodes[1] and frees the leaf
4560 * block extent. zero is returned if it all worked out, < 0 otherwise.
4562 * The path must have already been setup for deleting the leaf, including
4563 * all the proper balancing. path->nodes[1] must be locked.
4565 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4566 struct btrfs_root *root,
4567 struct btrfs_path *path,
4568 struct extent_buffer *leaf)
4570 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4571 del_ptr(trans, root, path, 1, path->slots[1], 1);
4574 * btrfs_free_extent is expensive, we want to make sure we
4575 * aren't holding any locks when we call it
4577 btrfs_unlock_up_safe(path, 0);
4579 root_sub_used(root, leaf->len);
4581 extent_buffer_get(leaf);
4582 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4583 free_extent_buffer_stale(leaf);
4586 * delete the item at the leaf level in path. If that empties
4587 * the leaf, remove it from the tree
4589 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4590 struct btrfs_path *path, int slot, int nr)
4592 struct extent_buffer *leaf;
4593 struct btrfs_item *item;
4600 struct btrfs_map_token token;
4602 btrfs_init_map_token(&token);
4604 leaf = path->nodes[0];
4605 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4607 for (i = 0; i < nr; i++)
4608 dsize += btrfs_item_size_nr(leaf, slot + i);
4610 nritems = btrfs_header_nritems(leaf);
4612 if (slot + nr != nritems) {
4613 int data_end = leaf_data_end(root, leaf);
4615 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4617 btrfs_leaf_data(leaf) + data_end,
4618 last_off - data_end);
4620 for (i = slot + nr; i < nritems; i++) {
4623 item = btrfs_item_nr(leaf, i);
4624 ioff = btrfs_token_item_offset(leaf, item, &token);
4625 btrfs_set_token_item_offset(leaf, item,
4626 ioff + dsize, &token);
4629 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4630 btrfs_item_nr_offset(slot + nr),
4631 sizeof(struct btrfs_item) *
4632 (nritems - slot - nr));
4634 btrfs_set_header_nritems(leaf, nritems - nr);
4637 /* delete the leaf if we've emptied it */
4639 if (leaf == root->node) {
4640 btrfs_set_header_level(leaf, 0);
4642 btrfs_set_path_blocking(path);
4643 clean_tree_block(trans, root, leaf);
4644 btrfs_del_leaf(trans, root, path, leaf);
4647 int used = leaf_space_used(leaf, 0, nritems);
4649 struct btrfs_disk_key disk_key;
4651 btrfs_item_key(leaf, &disk_key, 0);
4652 fixup_low_keys(trans, root, path, &disk_key, 1);
4655 /* delete the leaf if it is mostly empty */
4656 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4657 /* push_leaf_left fixes the path.
4658 * make sure the path still points to our leaf
4659 * for possible call to del_ptr below
4661 slot = path->slots[1];
4662 extent_buffer_get(leaf);
4664 btrfs_set_path_blocking(path);
4665 wret = push_leaf_left(trans, root, path, 1, 1,
4667 if (wret < 0 && wret != -ENOSPC)
4670 if (path->nodes[0] == leaf &&
4671 btrfs_header_nritems(leaf)) {
4672 wret = push_leaf_right(trans, root, path, 1,
4674 if (wret < 0 && wret != -ENOSPC)
4678 if (btrfs_header_nritems(leaf) == 0) {
4679 path->slots[1] = slot;
4680 btrfs_del_leaf(trans, root, path, leaf);
4681 free_extent_buffer(leaf);
4684 /* if we're still in the path, make sure
4685 * we're dirty. Otherwise, one of the
4686 * push_leaf functions must have already
4687 * dirtied this buffer
4689 if (path->nodes[0] == leaf)
4690 btrfs_mark_buffer_dirty(leaf);
4691 free_extent_buffer(leaf);
4694 btrfs_mark_buffer_dirty(leaf);
4701 * search the tree again to find a leaf with lesser keys
4702 * returns 0 if it found something or 1 if there are no lesser leaves.
4703 * returns < 0 on io errors.
4705 * This may release the path, and so you may lose any locks held at the
4708 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4710 struct btrfs_key key;
4711 struct btrfs_disk_key found_key;
4714 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4718 else if (key.type > 0)
4720 else if (key.objectid > 0)
4725 btrfs_release_path(path);
4726 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4729 btrfs_item_key(path->nodes[0], &found_key, 0);
4730 ret = comp_keys(&found_key, &key);
4737 * A helper function to walk down the tree starting at min_key, and looking
4738 * for nodes or leaves that are either in cache or have a minimum
4739 * transaction id. This is used by the btree defrag code, and tree logging
4741 * This does not cow, but it does stuff the starting key it finds back
4742 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4743 * key and get a writable path.
4745 * This does lock as it descends, and path->keep_locks should be set
4746 * to 1 by the caller.
4748 * This honors path->lowest_level to prevent descent past a given level
4751 * min_trans indicates the oldest transaction that you are interested
4752 * in walking through. Any nodes or leaves older than min_trans are
4753 * skipped over (without reading them).
4755 * returns zero if something useful was found, < 0 on error and 1 if there
4756 * was nothing in the tree that matched the search criteria.
4758 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4759 struct btrfs_key *max_key,
4760 struct btrfs_path *path, int cache_only,
4763 struct extent_buffer *cur;
4764 struct btrfs_key found_key;
4771 WARN_ON(!path->keep_locks);
4773 cur = btrfs_read_lock_root_node(root);
4774 level = btrfs_header_level(cur);
4775 WARN_ON(path->nodes[level]);
4776 path->nodes[level] = cur;
4777 path->locks[level] = BTRFS_READ_LOCK;
4779 if (btrfs_header_generation(cur) < min_trans) {
4784 nritems = btrfs_header_nritems(cur);
4785 level = btrfs_header_level(cur);
4786 sret = bin_search(cur, min_key, level, &slot);
4788 /* at the lowest level, we're done, setup the path and exit */
4789 if (level == path->lowest_level) {
4790 if (slot >= nritems)
4793 path->slots[level] = slot;
4794 btrfs_item_key_to_cpu(cur, &found_key, slot);
4797 if (sret && slot > 0)
4800 * check this node pointer against the cache_only and
4801 * min_trans parameters. If it isn't in cache or is too
4802 * old, skip to the next one.
4804 while (slot < nritems) {
4807 struct extent_buffer *tmp;
4808 struct btrfs_disk_key disk_key;
4810 blockptr = btrfs_node_blockptr(cur, slot);
4811 gen = btrfs_node_ptr_generation(cur, slot);
4812 if (gen < min_trans) {
4820 btrfs_node_key(cur, &disk_key, slot);
4821 if (comp_keys(&disk_key, max_key) >= 0) {
4827 tmp = btrfs_find_tree_block(root, blockptr,
4828 btrfs_level_size(root, level - 1));
4830 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
4831 free_extent_buffer(tmp);
4835 free_extent_buffer(tmp);
4840 * we didn't find a candidate key in this node, walk forward
4841 * and find another one
4843 if (slot >= nritems) {
4844 path->slots[level] = slot;
4845 btrfs_set_path_blocking(path);
4846 sret = btrfs_find_next_key(root, path, min_key, level,
4847 cache_only, min_trans);
4849 btrfs_release_path(path);
4855 /* save our key for returning back */
4856 btrfs_node_key_to_cpu(cur, &found_key, slot);
4857 path->slots[level] = slot;
4858 if (level == path->lowest_level) {
4860 unlock_up(path, level, 1, 0, NULL);
4863 btrfs_set_path_blocking(path);
4864 cur = read_node_slot(root, cur, slot);
4865 BUG_ON(!cur); /* -ENOMEM */
4867 btrfs_tree_read_lock(cur);
4869 path->locks[level - 1] = BTRFS_READ_LOCK;
4870 path->nodes[level - 1] = cur;
4871 unlock_up(path, level, 1, 0, NULL);
4872 btrfs_clear_path_blocking(path, NULL, 0);
4876 memcpy(min_key, &found_key, sizeof(found_key));
4877 btrfs_set_path_blocking(path);
4882 * this is similar to btrfs_next_leaf, but does not try to preserve
4883 * and fixup the path. It looks for and returns the next key in the
4884 * tree based on the current path and the cache_only and min_trans
4887 * 0 is returned if another key is found, < 0 if there are any errors
4888 * and 1 is returned if there are no higher keys in the tree
4890 * path->keep_locks should be set to 1 on the search made before
4891 * calling this function.
4893 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4894 struct btrfs_key *key, int level,
4895 int cache_only, u64 min_trans)
4898 struct extent_buffer *c;
4900 WARN_ON(!path->keep_locks);
4901 while (level < BTRFS_MAX_LEVEL) {
4902 if (!path->nodes[level])
4905 slot = path->slots[level] + 1;
4906 c = path->nodes[level];
4908 if (slot >= btrfs_header_nritems(c)) {
4911 struct btrfs_key cur_key;
4912 if (level + 1 >= BTRFS_MAX_LEVEL ||
4913 !path->nodes[level + 1])
4916 if (path->locks[level + 1]) {
4921 slot = btrfs_header_nritems(c) - 1;
4923 btrfs_item_key_to_cpu(c, &cur_key, slot);
4925 btrfs_node_key_to_cpu(c, &cur_key, slot);
4927 orig_lowest = path->lowest_level;
4928 btrfs_release_path(path);
4929 path->lowest_level = level;
4930 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4932 path->lowest_level = orig_lowest;
4936 c = path->nodes[level];
4937 slot = path->slots[level];
4944 btrfs_item_key_to_cpu(c, key, slot);
4946 u64 blockptr = btrfs_node_blockptr(c, slot);
4947 u64 gen = btrfs_node_ptr_generation(c, slot);
4950 struct extent_buffer *cur;
4951 cur = btrfs_find_tree_block(root, blockptr,
4952 btrfs_level_size(root, level - 1));
4954 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
4957 free_extent_buffer(cur);
4960 free_extent_buffer(cur);
4962 if (gen < min_trans) {
4966 btrfs_node_key_to_cpu(c, key, slot);
4974 * search the tree again to find a leaf with greater keys
4975 * returns 0 if it found something or 1 if there are no greater leaves.
4976 * returns < 0 on io errors.
4978 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4982 struct extent_buffer *c;
4983 struct extent_buffer *next;
4984 struct btrfs_key key;
4987 int old_spinning = path->leave_spinning;
4988 int next_rw_lock = 0;
4990 nritems = btrfs_header_nritems(path->nodes[0]);
4994 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4999 btrfs_release_path(path);
5001 path->keep_locks = 1;
5002 path->leave_spinning = 1;
5004 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5005 path->keep_locks = 0;
5010 nritems = btrfs_header_nritems(path->nodes[0]);
5012 * by releasing the path above we dropped all our locks. A balance
5013 * could have added more items next to the key that used to be
5014 * at the very end of the block. So, check again here and
5015 * advance the path if there are now more items available.
5017 if (nritems > 0 && path->slots[0] < nritems - 1) {
5024 while (level < BTRFS_MAX_LEVEL) {
5025 if (!path->nodes[level]) {
5030 slot = path->slots[level] + 1;
5031 c = path->nodes[level];
5032 if (slot >= btrfs_header_nritems(c)) {
5034 if (level == BTRFS_MAX_LEVEL) {
5042 btrfs_tree_unlock_rw(next, next_rw_lock);
5043 free_extent_buffer(next);
5047 next_rw_lock = path->locks[level];
5048 ret = read_block_for_search(NULL, root, path, &next, level,
5054 btrfs_release_path(path);
5058 if (!path->skip_locking) {
5059 ret = btrfs_try_tree_read_lock(next);
5061 btrfs_set_path_blocking(path);
5062 btrfs_tree_read_lock(next);
5063 btrfs_clear_path_blocking(path, next,
5066 next_rw_lock = BTRFS_READ_LOCK;
5070 path->slots[level] = slot;
5073 c = path->nodes[level];
5074 if (path->locks[level])
5075 btrfs_tree_unlock_rw(c, path->locks[level]);
5077 free_extent_buffer(c);
5078 path->nodes[level] = next;
5079 path->slots[level] = 0;
5080 if (!path->skip_locking)
5081 path->locks[level] = next_rw_lock;
5085 ret = read_block_for_search(NULL, root, path, &next, level,
5091 btrfs_release_path(path);
5095 if (!path->skip_locking) {
5096 ret = btrfs_try_tree_read_lock(next);
5098 btrfs_set_path_blocking(path);
5099 btrfs_tree_read_lock(next);
5100 btrfs_clear_path_blocking(path, next,
5103 next_rw_lock = BTRFS_READ_LOCK;
5108 unlock_up(path, 0, 1, 0, NULL);
5109 path->leave_spinning = old_spinning;
5111 btrfs_set_path_blocking(path);
5117 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5118 * searching until it gets past min_objectid or finds an item of 'type'
5120 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5122 int btrfs_previous_item(struct btrfs_root *root,
5123 struct btrfs_path *path, u64 min_objectid,
5126 struct btrfs_key found_key;
5127 struct extent_buffer *leaf;
5132 if (path->slots[0] == 0) {
5133 btrfs_set_path_blocking(path);
5134 ret = btrfs_prev_leaf(root, path);
5140 leaf = path->nodes[0];
5141 nritems = btrfs_header_nritems(leaf);
5144 if (path->slots[0] == nritems)
5147 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5148 if (found_key.objectid < min_objectid)
5150 if (found_key.type == type)
5152 if (found_key.objectid == min_objectid &&
5153 found_key.type < type)