2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/vmalloc.h>
24 #include "transaction.h"
25 #include "delayed-ref.h"
28 struct extent_inode_elem {
31 struct extent_inode_elem *next;
34 static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
35 struct btrfs_file_extent_item *fi,
37 struct extent_inode_elem **eie)
41 struct extent_inode_elem *e;
43 data_offset = btrfs_file_extent_offset(eb, fi);
44 data_len = btrfs_file_extent_num_bytes(eb, fi);
46 if (extent_item_pos < data_offset ||
47 extent_item_pos >= data_offset + data_len)
50 e = kmalloc(sizeof(*e), GFP_NOFS);
55 e->inum = key->objectid;
56 e->offset = key->offset + (extent_item_pos - data_offset);
62 static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
64 struct extent_inode_elem **eie)
68 struct btrfs_file_extent_item *fi;
75 * from the shared data ref, we only have the leaf but we need
76 * the key. thus, we must look into all items and see that we
77 * find one (some) with a reference to our extent item.
79 nritems = btrfs_header_nritems(eb);
80 for (slot = 0; slot < nritems; ++slot) {
81 btrfs_item_key_to_cpu(eb, &key, slot);
82 if (key.type != BTRFS_EXTENT_DATA_KEY)
84 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
85 extent_type = btrfs_file_extent_type(eb, fi);
86 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
88 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
89 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
90 if (disk_byte != wanted_disk_byte)
93 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
102 * this structure records all encountered refs on the way up to the root
104 struct __prelim_ref {
105 struct list_head list;
107 struct btrfs_key key_for_search;
110 struct extent_inode_elem *inode_list;
112 u64 wanted_disk_byte;
116 * the rules for all callers of this function are:
117 * - obtaining the parent is the goal
118 * - if you add a key, you must know that it is a correct key
119 * - if you cannot add the parent or a correct key, then we will look into the
120 * block later to set a correct key
124 * backref type | shared | indirect | shared | indirect
125 * information | tree | tree | data | data
126 * --------------------+--------+----------+--------+----------
127 * parent logical | y | - | - | -
128 * key to resolve | - | y | y | y
129 * tree block logical | - | - | - | -
130 * root for resolving | y | y | y | y
132 * - column 1: we've the parent -> done
133 * - column 2, 3, 4: we use the key to find the parent
135 * on disk refs (inline or keyed)
136 * ==============================
137 * backref type | shared | indirect | shared | indirect
138 * information | tree | tree | data | data
139 * --------------------+--------+----------+--------+----------
140 * parent logical | y | - | y | -
141 * key to resolve | - | - | - | y
142 * tree block logical | y | y | y | y
143 * root for resolving | - | y | y | y
145 * - column 1, 3: we've the parent -> done
146 * - column 2: we take the first key from the block to find the parent
147 * (see __add_missing_keys)
148 * - column 4: we use the key to find the parent
150 * additional information that's available but not required to find the parent
151 * block might help in merging entries to gain some speed.
154 static int __add_prelim_ref(struct list_head *head, u64 root_id,
155 struct btrfs_key *key, int level,
156 u64 parent, u64 wanted_disk_byte, int count)
158 struct __prelim_ref *ref;
160 /* in case we're adding delayed refs, we're holding the refs spinlock */
161 ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
165 ref->root_id = root_id;
167 ref->key_for_search = *key;
169 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
171 ref->inode_list = NULL;
174 ref->parent = parent;
175 ref->wanted_disk_byte = wanted_disk_byte;
176 list_add_tail(&ref->list, head);
181 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
182 struct ulist *parents, int level,
183 struct btrfs_key *key_for_search, u64 time_seq,
184 u64 wanted_disk_byte,
185 const u64 *extent_item_pos)
189 struct extent_buffer *eb;
190 struct btrfs_key key;
191 struct btrfs_file_extent_item *fi;
192 struct extent_inode_elem *eie = NULL;
196 eb = path->nodes[level];
197 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
204 * We normally enter this function with the path already pointing to
205 * the first item to check. But sometimes, we may enter it with
206 * slot==nritems. In that case, go to the next leaf before we continue.
208 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
209 ret = btrfs_next_old_leaf(root, path, time_seq);
213 slot = path->slots[0];
215 btrfs_item_key_to_cpu(eb, &key, slot);
217 if (key.objectid != key_for_search->objectid ||
218 key.type != BTRFS_EXTENT_DATA_KEY)
221 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
222 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
224 if (disk_byte == wanted_disk_byte) {
226 if (extent_item_pos) {
227 ret = check_extent_in_eb(&key, eb, fi,
234 ret = ulist_add(parents, eb->start,
235 (uintptr_t)eie, GFP_NOFS);
238 if (!extent_item_pos) {
239 ret = btrfs_next_old_leaf(root, path,
245 ret = btrfs_next_old_item(root, path, time_seq);
254 * resolve an indirect backref in the form (root_id, key, level)
255 * to a logical address
257 static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
258 int search_commit_root,
260 struct __prelim_ref *ref,
261 struct ulist *parents,
262 const u64 *extent_item_pos)
264 struct btrfs_path *path;
265 struct btrfs_root *root;
266 struct btrfs_key root_key;
267 struct extent_buffer *eb;
270 int level = ref->level;
272 path = btrfs_alloc_path();
275 path->search_commit_root = !!search_commit_root;
277 root_key.objectid = ref->root_id;
278 root_key.type = BTRFS_ROOT_ITEM_KEY;
279 root_key.offset = (u64)-1;
280 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
287 root_level = btrfs_header_level(root->node);
290 if (root_level + 1 == level)
293 path->lowest_level = level;
294 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
295 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
296 "%d for key (%llu %u %llu)\n",
297 (unsigned long long)ref->root_id, level, ref->count, ret,
298 (unsigned long long)ref->key_for_search.objectid,
299 ref->key_for_search.type,
300 (unsigned long long)ref->key_for_search.offset);
304 eb = path->nodes[level];
312 eb = path->nodes[level];
315 ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
316 time_seq, ref->wanted_disk_byte,
319 btrfs_free_path(path);
324 * resolve all indirect backrefs from the list
326 static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
327 int search_commit_root, u64 time_seq,
328 struct list_head *head,
329 const u64 *extent_item_pos)
333 struct __prelim_ref *ref;
334 struct __prelim_ref *ref_safe;
335 struct __prelim_ref *new_ref;
336 struct ulist *parents;
337 struct ulist_node *node;
338 struct ulist_iterator uiter;
340 parents = ulist_alloc(GFP_NOFS);
345 * _safe allows us to insert directly after the current item without
346 * iterating over the newly inserted items.
347 * we're also allowed to re-assign ref during iteration.
349 list_for_each_entry_safe(ref, ref_safe, head, list) {
350 if (ref->parent) /* already direct */
354 err = __resolve_indirect_ref(fs_info, search_commit_root,
355 time_seq, ref, parents,
363 /* we put the first parent into the ref at hand */
364 ULIST_ITER_INIT(&uiter);
365 node = ulist_next(parents, &uiter);
366 ref->parent = node ? node->val : 0;
367 ref->inode_list = node ?
368 (struct extent_inode_elem *)(uintptr_t)node->aux : 0;
370 /* additional parents require new refs being added here */
371 while ((node = ulist_next(parents, &uiter))) {
372 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
377 memcpy(new_ref, ref, sizeof(*ref));
378 new_ref->parent = node->val;
379 new_ref->inode_list = (struct extent_inode_elem *)
380 (uintptr_t)node->aux;
381 list_add(&new_ref->list, &ref->list);
383 ulist_reinit(parents);
390 static inline int ref_for_same_block(struct __prelim_ref *ref1,
391 struct __prelim_ref *ref2)
393 if (ref1->level != ref2->level)
395 if (ref1->root_id != ref2->root_id)
397 if (ref1->key_for_search.type != ref2->key_for_search.type)
399 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
401 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
403 if (ref1->parent != ref2->parent)
410 * read tree blocks and add keys where required.
412 static int __add_missing_keys(struct btrfs_fs_info *fs_info,
413 struct list_head *head)
415 struct list_head *pos;
416 struct extent_buffer *eb;
418 list_for_each(pos, head) {
419 struct __prelim_ref *ref;
420 ref = list_entry(pos, struct __prelim_ref, list);
424 if (ref->key_for_search.type)
426 BUG_ON(!ref->wanted_disk_byte);
427 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
428 fs_info->tree_root->leafsize, 0);
430 btrfs_tree_read_lock(eb);
431 if (btrfs_header_level(eb) == 0)
432 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
434 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
435 btrfs_tree_read_unlock(eb);
436 free_extent_buffer(eb);
442 * merge two lists of backrefs and adjust counts accordingly
444 * mode = 1: merge identical keys, if key is set
445 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
446 * additionally, we could even add a key range for the blocks we
447 * looked into to merge even more (-> replace unresolved refs by those
449 * mode = 2: merge identical parents
451 static int __merge_refs(struct list_head *head, int mode)
453 struct list_head *pos1;
455 list_for_each(pos1, head) {
456 struct list_head *n2;
457 struct list_head *pos2;
458 struct __prelim_ref *ref1;
460 ref1 = list_entry(pos1, struct __prelim_ref, list);
462 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
463 pos2 = n2, n2 = pos2->next) {
464 struct __prelim_ref *ref2;
465 struct __prelim_ref *xchg;
467 ref2 = list_entry(pos2, struct __prelim_ref, list);
470 if (!ref_for_same_block(ref1, ref2))
472 if (!ref1->parent && ref2->parent) {
477 ref1->count += ref2->count;
479 if (ref1->parent != ref2->parent)
481 ref1->count += ref2->count;
483 list_del(&ref2->list);
492 * add all currently queued delayed refs from this head whose seq nr is
493 * smaller or equal that seq to the list
495 static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
496 struct list_head *prefs)
498 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
499 struct rb_node *n = &head->node.rb_node;
500 struct btrfs_key key;
501 struct btrfs_key op_key = {0};
505 if (extent_op && extent_op->update_key)
506 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
508 while ((n = rb_prev(n))) {
509 struct btrfs_delayed_ref_node *node;
510 node = rb_entry(n, struct btrfs_delayed_ref_node,
512 if (node->bytenr != head->node.bytenr)
514 WARN_ON(node->is_head);
519 switch (node->action) {
520 case BTRFS_ADD_DELAYED_EXTENT:
521 case BTRFS_UPDATE_DELAYED_HEAD:
524 case BTRFS_ADD_DELAYED_REF:
527 case BTRFS_DROP_DELAYED_REF:
533 switch (node->type) {
534 case BTRFS_TREE_BLOCK_REF_KEY: {
535 struct btrfs_delayed_tree_ref *ref;
537 ref = btrfs_delayed_node_to_tree_ref(node);
538 ret = __add_prelim_ref(prefs, ref->root, &op_key,
539 ref->level + 1, 0, node->bytenr,
540 node->ref_mod * sgn);
543 case BTRFS_SHARED_BLOCK_REF_KEY: {
544 struct btrfs_delayed_tree_ref *ref;
546 ref = btrfs_delayed_node_to_tree_ref(node);
547 ret = __add_prelim_ref(prefs, ref->root, NULL,
548 ref->level + 1, ref->parent,
550 node->ref_mod * sgn);
553 case BTRFS_EXTENT_DATA_REF_KEY: {
554 struct btrfs_delayed_data_ref *ref;
555 ref = btrfs_delayed_node_to_data_ref(node);
557 key.objectid = ref->objectid;
558 key.type = BTRFS_EXTENT_DATA_KEY;
559 key.offset = ref->offset;
560 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
562 node->ref_mod * sgn);
565 case BTRFS_SHARED_DATA_REF_KEY: {
566 struct btrfs_delayed_data_ref *ref;
568 ref = btrfs_delayed_node_to_data_ref(node);
570 key.objectid = ref->objectid;
571 key.type = BTRFS_EXTENT_DATA_KEY;
572 key.offset = ref->offset;
573 ret = __add_prelim_ref(prefs, ref->root, &key, 0,
574 ref->parent, node->bytenr,
575 node->ref_mod * sgn);
588 * add all inline backrefs for bytenr to the list
590 static int __add_inline_refs(struct btrfs_fs_info *fs_info,
591 struct btrfs_path *path, u64 bytenr,
592 int *info_level, struct list_head *prefs)
596 struct extent_buffer *leaf;
597 struct btrfs_key key;
600 struct btrfs_extent_item *ei;
605 * enumerate all inline refs
607 leaf = path->nodes[0];
608 slot = path->slots[0];
610 item_size = btrfs_item_size_nr(leaf, slot);
611 BUG_ON(item_size < sizeof(*ei));
613 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
614 flags = btrfs_extent_flags(leaf, ei);
616 ptr = (unsigned long)(ei + 1);
617 end = (unsigned long)ei + item_size;
619 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
620 struct btrfs_tree_block_info *info;
622 info = (struct btrfs_tree_block_info *)ptr;
623 *info_level = btrfs_tree_block_level(leaf, info);
624 ptr += sizeof(struct btrfs_tree_block_info);
627 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
631 struct btrfs_extent_inline_ref *iref;
635 iref = (struct btrfs_extent_inline_ref *)ptr;
636 type = btrfs_extent_inline_ref_type(leaf, iref);
637 offset = btrfs_extent_inline_ref_offset(leaf, iref);
640 case BTRFS_SHARED_BLOCK_REF_KEY:
641 ret = __add_prelim_ref(prefs, 0, NULL,
642 *info_level + 1, offset,
645 case BTRFS_SHARED_DATA_REF_KEY: {
646 struct btrfs_shared_data_ref *sdref;
649 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
650 count = btrfs_shared_data_ref_count(leaf, sdref);
651 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
655 case BTRFS_TREE_BLOCK_REF_KEY:
656 ret = __add_prelim_ref(prefs, offset, NULL,
660 case BTRFS_EXTENT_DATA_REF_KEY: {
661 struct btrfs_extent_data_ref *dref;
665 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
666 count = btrfs_extent_data_ref_count(leaf, dref);
667 key.objectid = btrfs_extent_data_ref_objectid(leaf,
669 key.type = BTRFS_EXTENT_DATA_KEY;
670 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
671 root = btrfs_extent_data_ref_root(leaf, dref);
672 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
680 ptr += btrfs_extent_inline_ref_size(type);
687 * add all non-inline backrefs for bytenr to the list
689 static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
690 struct btrfs_path *path, u64 bytenr,
691 int info_level, struct list_head *prefs)
693 struct btrfs_root *extent_root = fs_info->extent_root;
696 struct extent_buffer *leaf;
697 struct btrfs_key key;
700 ret = btrfs_next_item(extent_root, path);
708 slot = path->slots[0];
709 leaf = path->nodes[0];
710 btrfs_item_key_to_cpu(leaf, &key, slot);
712 if (key.objectid != bytenr)
714 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
716 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
720 case BTRFS_SHARED_BLOCK_REF_KEY:
721 ret = __add_prelim_ref(prefs, 0, NULL,
722 info_level + 1, key.offset,
725 case BTRFS_SHARED_DATA_REF_KEY: {
726 struct btrfs_shared_data_ref *sdref;
729 sdref = btrfs_item_ptr(leaf, slot,
730 struct btrfs_shared_data_ref);
731 count = btrfs_shared_data_ref_count(leaf, sdref);
732 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
736 case BTRFS_TREE_BLOCK_REF_KEY:
737 ret = __add_prelim_ref(prefs, key.offset, NULL,
741 case BTRFS_EXTENT_DATA_REF_KEY: {
742 struct btrfs_extent_data_ref *dref;
746 dref = btrfs_item_ptr(leaf, slot,
747 struct btrfs_extent_data_ref);
748 count = btrfs_extent_data_ref_count(leaf, dref);
749 key.objectid = btrfs_extent_data_ref_objectid(leaf,
751 key.type = BTRFS_EXTENT_DATA_KEY;
752 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
753 root = btrfs_extent_data_ref_root(leaf, dref);
754 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
768 * this adds all existing backrefs (inline backrefs, backrefs and delayed
769 * refs) for the given bytenr to the refs list, merges duplicates and resolves
770 * indirect refs to their parent bytenr.
771 * When roots are found, they're added to the roots list
773 * FIXME some caching might speed things up
775 static int find_parent_nodes(struct btrfs_trans_handle *trans,
776 struct btrfs_fs_info *fs_info, u64 bytenr,
777 u64 time_seq, struct ulist *refs,
778 struct ulist *roots, const u64 *extent_item_pos)
780 struct btrfs_key key;
781 struct btrfs_path *path;
782 struct btrfs_delayed_ref_root *delayed_refs = NULL;
783 struct btrfs_delayed_ref_head *head;
786 int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
787 struct list_head prefs_delayed;
788 struct list_head prefs;
789 struct __prelim_ref *ref;
791 INIT_LIST_HEAD(&prefs);
792 INIT_LIST_HEAD(&prefs_delayed);
794 key.objectid = bytenr;
795 key.type = BTRFS_EXTENT_ITEM_KEY;
796 key.offset = (u64)-1;
798 path = btrfs_alloc_path();
801 path->search_commit_root = !!search_commit_root;
804 * grab both a lock on the path and a lock on the delayed ref head.
805 * We need both to get a consistent picture of how the refs look
806 * at a specified point in time
811 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
816 if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
818 * look if there are updates for this ref queued and lock the
821 delayed_refs = &trans->transaction->delayed_refs;
822 spin_lock(&delayed_refs->lock);
823 head = btrfs_find_delayed_ref_head(trans, bytenr);
825 if (!mutex_trylock(&head->mutex)) {
826 atomic_inc(&head->node.refs);
827 spin_unlock(&delayed_refs->lock);
829 btrfs_release_path(path);
832 * Mutex was contended, block until it's
833 * released and try again
835 mutex_lock(&head->mutex);
836 mutex_unlock(&head->mutex);
837 btrfs_put_delayed_ref(&head->node);
840 ret = __add_delayed_refs(head, time_seq,
842 mutex_unlock(&head->mutex);
844 spin_unlock(&delayed_refs->lock);
848 spin_unlock(&delayed_refs->lock);
851 if (path->slots[0]) {
852 struct extent_buffer *leaf;
856 leaf = path->nodes[0];
857 slot = path->slots[0];
858 btrfs_item_key_to_cpu(leaf, &key, slot);
859 if (key.objectid == bytenr &&
860 key.type == BTRFS_EXTENT_ITEM_KEY) {
861 ret = __add_inline_refs(fs_info, path, bytenr,
862 &info_level, &prefs);
865 ret = __add_keyed_refs(fs_info, path, bytenr,
871 btrfs_release_path(path);
873 list_splice_init(&prefs_delayed, &prefs);
875 ret = __add_missing_keys(fs_info, &prefs);
879 ret = __merge_refs(&prefs, 1);
883 ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
884 &prefs, extent_item_pos);
888 ret = __merge_refs(&prefs, 2);
892 while (!list_empty(&prefs)) {
893 ref = list_first_entry(&prefs, struct __prelim_ref, list);
894 list_del(&ref->list);
897 if (ref->count && ref->root_id && ref->parent == 0) {
898 /* no parent == root of tree */
899 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
902 if (ref->count && ref->parent) {
903 struct extent_inode_elem *eie = NULL;
904 if (extent_item_pos && !ref->inode_list) {
906 struct extent_buffer *eb;
907 bsz = btrfs_level_size(fs_info->extent_root,
909 eb = read_tree_block(fs_info->extent_root,
910 ref->parent, bsz, 0);
912 ret = find_extent_in_eb(eb, bytenr,
913 *extent_item_pos, &eie);
914 ref->inode_list = eie;
915 free_extent_buffer(eb);
917 ret = ulist_add_merge(refs, ref->parent,
918 (uintptr_t)ref->inode_list,
919 (u64 *)&eie, GFP_NOFS);
920 if (!ret && extent_item_pos) {
922 * we've recorded that parent, so we must extend
923 * its inode list here
928 eie->next = ref->inode_list;
936 btrfs_free_path(path);
937 while (!list_empty(&prefs)) {
938 ref = list_first_entry(&prefs, struct __prelim_ref, list);
939 list_del(&ref->list);
942 while (!list_empty(&prefs_delayed)) {
943 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
945 list_del(&ref->list);
952 static void free_leaf_list(struct ulist *blocks)
954 struct ulist_node *node = NULL;
955 struct extent_inode_elem *eie;
956 struct extent_inode_elem *eie_next;
957 struct ulist_iterator uiter;
959 ULIST_ITER_INIT(&uiter);
960 while ((node = ulist_next(blocks, &uiter))) {
963 eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
964 for (; eie; eie = eie_next) {
965 eie_next = eie->next;
975 * Finds all leafs with a reference to the specified combination of bytenr and
976 * offset. key_list_head will point to a list of corresponding keys (caller must
977 * free each list element). The leafs will be stored in the leafs ulist, which
978 * must be freed with ulist_free.
980 * returns 0 on success, <0 on error
982 static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
983 struct btrfs_fs_info *fs_info, u64 bytenr,
984 u64 time_seq, struct ulist **leafs,
985 const u64 *extent_item_pos)
990 tmp = ulist_alloc(GFP_NOFS);
993 *leafs = ulist_alloc(GFP_NOFS);
999 ret = find_parent_nodes(trans, fs_info, bytenr,
1000 time_seq, *leafs, tmp, extent_item_pos);
1003 if (ret < 0 && ret != -ENOENT) {
1004 free_leaf_list(*leafs);
1012 * walk all backrefs for a given extent to find all roots that reference this
1013 * extent. Walking a backref means finding all extents that reference this
1014 * extent and in turn walk the backrefs of those, too. Naturally this is a
1015 * recursive process, but here it is implemented in an iterative fashion: We
1016 * find all referencing extents for the extent in question and put them on a
1017 * list. In turn, we find all referencing extents for those, further appending
1018 * to the list. The way we iterate the list allows adding more elements after
1019 * the current while iterating. The process stops when we reach the end of the
1020 * list. Found roots are added to the roots list.
1022 * returns 0 on success, < 0 on error.
1024 int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1025 struct btrfs_fs_info *fs_info, u64 bytenr,
1026 u64 time_seq, struct ulist **roots)
1029 struct ulist_node *node = NULL;
1030 struct ulist_iterator uiter;
1033 tmp = ulist_alloc(GFP_NOFS);
1036 *roots = ulist_alloc(GFP_NOFS);
1042 ULIST_ITER_INIT(&uiter);
1044 ret = find_parent_nodes(trans, fs_info, bytenr,
1045 time_seq, tmp, *roots, NULL);
1046 if (ret < 0 && ret != -ENOENT) {
1051 node = ulist_next(tmp, &uiter);
1062 static int __inode_info(u64 inum, u64 ioff, u8 key_type,
1063 struct btrfs_root *fs_root, struct btrfs_path *path,
1064 struct btrfs_key *found_key)
1067 struct btrfs_key key;
1068 struct extent_buffer *eb;
1070 key.type = key_type;
1071 key.objectid = inum;
1074 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
1078 eb = path->nodes[0];
1079 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
1080 ret = btrfs_next_leaf(fs_root, path);
1083 eb = path->nodes[0];
1086 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
1087 if (found_key->type != key.type || found_key->objectid != key.objectid)
1094 * this makes the path point to (inum INODE_ITEM ioff)
1096 int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1097 struct btrfs_path *path)
1099 struct btrfs_key key;
1100 return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
1104 static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
1105 struct btrfs_path *path,
1106 struct btrfs_key *found_key)
1108 return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
1113 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
1114 * of the path are separated by '/' and the path is guaranteed to be
1115 * 0-terminated. the path is only given within the current file system.
1116 * Therefore, it never starts with a '/'. the caller is responsible to provide
1117 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1118 * the start point of the resulting string is returned. this pointer is within
1120 * in case the path buffer would overflow, the pointer is decremented further
1121 * as if output was written to the buffer, though no more output is actually
1122 * generated. that way, the caller can determine how much space would be
1123 * required for the path to fit into the buffer. in that case, the returned
1124 * value will be smaller than dest. callers must check this!
1126 char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1127 struct btrfs_inode_ref *iref,
1128 struct extent_buffer *eb_in, u64 parent,
1129 char *dest, u32 size)
1135 s64 bytes_left = size - 1;
1136 struct extent_buffer *eb = eb_in;
1137 struct btrfs_key found_key;
1138 int leave_spinning = path->leave_spinning;
1140 if (bytes_left >= 0)
1141 dest[bytes_left] = '\0';
1143 path->leave_spinning = 1;
1145 len = btrfs_inode_ref_name_len(eb, iref);
1147 if (bytes_left >= 0)
1148 read_extent_buffer(eb, dest + bytes_left,
1149 (unsigned long)(iref + 1), len);
1151 btrfs_tree_read_unlock_blocking(eb);
1152 free_extent_buffer(eb);
1154 ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
1159 next_inum = found_key.offset;
1161 /* regular exit ahead */
1162 if (parent == next_inum)
1165 slot = path->slots[0];
1166 eb = path->nodes[0];
1167 /* make sure we can use eb after releasing the path */
1169 atomic_inc(&eb->refs);
1170 btrfs_tree_read_lock(eb);
1171 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1173 btrfs_release_path(path);
1175 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1178 if (bytes_left >= 0)
1179 dest[bytes_left] = '/';
1182 btrfs_release_path(path);
1183 path->leave_spinning = leave_spinning;
1186 return ERR_PTR(ret);
1188 return dest + bytes_left;
1192 * this makes the path point to (logical EXTENT_ITEM *)
1193 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1194 * tree blocks and <0 on error.
1196 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1197 struct btrfs_path *path, struct btrfs_key *found_key,
1203 struct extent_buffer *eb;
1204 struct btrfs_extent_item *ei;
1205 struct btrfs_key key;
1207 key.type = BTRFS_EXTENT_ITEM_KEY;
1208 key.objectid = logical;
1209 key.offset = (u64)-1;
1211 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1214 ret = btrfs_previous_item(fs_info->extent_root, path,
1215 0, BTRFS_EXTENT_ITEM_KEY);
1219 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1220 if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
1221 found_key->objectid > logical ||
1222 found_key->objectid + found_key->offset <= logical) {
1223 pr_debug("logical %llu is not within any extent\n",
1224 (unsigned long long)logical);
1228 eb = path->nodes[0];
1229 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1230 BUG_ON(item_size < sizeof(*ei));
1232 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1233 flags = btrfs_extent_flags(eb, ei);
1235 pr_debug("logical %llu is at position %llu within the extent (%llu "
1236 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1237 (unsigned long long)logical,
1238 (unsigned long long)(logical - found_key->objectid),
1239 (unsigned long long)found_key->objectid,
1240 (unsigned long long)found_key->offset,
1241 (unsigned long long)flags, item_size);
1243 WARN_ON(!flags_ret);
1245 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1246 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1247 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1248 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1258 * helper function to iterate extent inline refs. ptr must point to a 0 value
1259 * for the first call and may be modified. it is used to track state.
1260 * if more refs exist, 0 is returned and the next call to
1261 * __get_extent_inline_ref must pass the modified ptr parameter to get the
1262 * next ref. after the last ref was processed, 1 is returned.
1263 * returns <0 on error
1265 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1266 struct btrfs_extent_item *ei, u32 item_size,
1267 struct btrfs_extent_inline_ref **out_eiref,
1272 struct btrfs_tree_block_info *info;
1276 flags = btrfs_extent_flags(eb, ei);
1277 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1278 info = (struct btrfs_tree_block_info *)(ei + 1);
1280 (struct btrfs_extent_inline_ref *)(info + 1);
1282 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1284 *ptr = (unsigned long)*out_eiref;
1285 if ((void *)*ptr >= (void *)ei + item_size)
1289 end = (unsigned long)ei + item_size;
1290 *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
1291 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1293 *ptr += btrfs_extent_inline_ref_size(*out_type);
1294 WARN_ON(*ptr > end);
1296 return 1; /* last */
1302 * reads the tree block backref for an extent. tree level and root are returned
1303 * through out_level and out_root. ptr must point to a 0 value for the first
1304 * call and may be modified (see __get_extent_inline_ref comment).
1305 * returns 0 if data was provided, 1 if there was no more data to provide or
1308 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1309 struct btrfs_extent_item *ei, u32 item_size,
1310 u64 *out_root, u8 *out_level)
1314 struct btrfs_tree_block_info *info;
1315 struct btrfs_extent_inline_ref *eiref;
1317 if (*ptr == (unsigned long)-1)
1321 ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
1326 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1327 type == BTRFS_SHARED_BLOCK_REF_KEY)
1334 /* we can treat both ref types equally here */
1335 info = (struct btrfs_tree_block_info *)(ei + 1);
1336 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1337 *out_level = btrfs_tree_block_level(eb, info);
1340 *ptr = (unsigned long)-1;
1345 static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1346 u64 root, u64 extent_item_objectid,
1347 iterate_extent_inodes_t *iterate, void *ctx)
1349 struct extent_inode_elem *eie;
1352 for (eie = inode_list; eie; eie = eie->next) {
1353 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1354 "root %llu\n", extent_item_objectid,
1355 eie->inum, eie->offset, root);
1356 ret = iterate(eie->inum, eie->offset, root, ctx);
1358 pr_debug("stopping iteration for %llu due to ret=%d\n",
1359 extent_item_objectid, ret);
1368 * calls iterate() for every inode that references the extent identified by
1369 * the given parameters.
1370 * when the iterator function returns a non-zero value, iteration stops.
1372 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1373 u64 extent_item_objectid, u64 extent_item_pos,
1374 int search_commit_root,
1375 iterate_extent_inodes_t *iterate, void *ctx)
1378 struct list_head data_refs = LIST_HEAD_INIT(data_refs);
1379 struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
1380 struct btrfs_trans_handle *trans;
1381 struct ulist *refs = NULL;
1382 struct ulist *roots = NULL;
1383 struct ulist_node *ref_node = NULL;
1384 struct ulist_node *root_node = NULL;
1385 struct seq_list tree_mod_seq_elem = {};
1386 struct ulist_iterator ref_uiter;
1387 struct ulist_iterator root_uiter;
1389 pr_debug("resolving all inodes for extent %llu\n",
1390 extent_item_objectid);
1392 if (search_commit_root) {
1393 trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
1395 trans = btrfs_join_transaction(fs_info->extent_root);
1397 return PTR_ERR(trans);
1398 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1401 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1402 tree_mod_seq_elem.seq, &refs,
1407 ULIST_ITER_INIT(&ref_uiter);
1408 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1409 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
1410 tree_mod_seq_elem.seq, &roots);
1413 ULIST_ITER_INIT(&root_uiter);
1414 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1415 pr_debug("root %llu references leaf %llu, data list "
1416 "%#llx\n", root_node->val, ref_node->val,
1417 (long long)ref_node->aux);
1418 ret = iterate_leaf_refs((struct extent_inode_elem *)
1419 (uintptr_t)ref_node->aux,
1421 extent_item_objectid,
1428 free_leaf_list(refs);
1431 if (!search_commit_root) {
1432 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1433 btrfs_end_transaction(trans, fs_info->extent_root);
1439 int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1440 struct btrfs_path *path,
1441 iterate_extent_inodes_t *iterate, void *ctx)
1444 u64 extent_item_pos;
1446 struct btrfs_key found_key;
1447 int search_commit_root = path->search_commit_root;
1449 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1450 btrfs_release_path(path);
1453 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1456 extent_item_pos = logical - found_key.objectid;
1457 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1458 extent_item_pos, search_commit_root,
1464 static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1465 struct btrfs_path *path,
1466 iterate_irefs_t *iterate, void *ctx)
1475 struct extent_buffer *eb;
1476 struct btrfs_item *item;
1477 struct btrfs_inode_ref *iref;
1478 struct btrfs_key found_key;
1481 path->leave_spinning = 1;
1482 ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
1487 ret = found ? 0 : -ENOENT;
1492 parent = found_key.offset;
1493 slot = path->slots[0];
1494 eb = path->nodes[0];
1495 /* make sure we can use eb after releasing the path */
1496 atomic_inc(&eb->refs);
1497 btrfs_tree_read_lock(eb);
1498 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1499 btrfs_release_path(path);
1501 item = btrfs_item_nr(eb, slot);
1502 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1504 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1505 name_len = btrfs_inode_ref_name_len(eb, iref);
1506 /* path must be released before calling iterate()! */
1507 pr_debug("following ref at offset %u for inode %llu in "
1509 (unsigned long long)found_key.objectid,
1510 (unsigned long long)fs_root->objectid);
1511 ret = iterate(parent, iref, eb, ctx);
1514 len = sizeof(*iref) + name_len;
1515 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1517 btrfs_tree_read_unlock_blocking(eb);
1518 free_extent_buffer(eb);
1521 btrfs_release_path(path);
1527 * returns 0 if the path could be dumped (probably truncated)
1528 * returns <0 in case of an error
1530 static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
1531 struct extent_buffer *eb, void *ctx)
1533 struct inode_fs_paths *ipath = ctx;
1536 int i = ipath->fspath->elem_cnt;
1537 const int s_ptr = sizeof(char *);
1540 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1541 ipath->fspath->bytes_left - s_ptr : 0;
1543 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1544 fspath = btrfs_iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
1545 inum, fspath_min, bytes_left);
1547 return PTR_ERR(fspath);
1549 if (fspath > fspath_min) {
1550 pr_debug("path resolved: %s\n", fspath);
1551 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1552 ++ipath->fspath->elem_cnt;
1553 ipath->fspath->bytes_left = fspath - fspath_min;
1555 pr_debug("missed path, not enough space. missing bytes: %lu, "
1556 "constructed so far: %s\n",
1557 (unsigned long)(fspath_min - fspath), fspath_min);
1558 ++ipath->fspath->elem_missed;
1559 ipath->fspath->bytes_missing += fspath_min - fspath;
1560 ipath->fspath->bytes_left = 0;
1567 * this dumps all file system paths to the inode into the ipath struct, provided
1568 * is has been created large enough. each path is zero-terminated and accessed
1569 * from ipath->fspath->val[i].
1570 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1571 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1572 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1573 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1574 * have been needed to return all paths.
1576 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1578 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1579 inode_to_path, ipath);
1582 struct btrfs_data_container *init_data_container(u32 total_bytes)
1584 struct btrfs_data_container *data;
1587 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1588 data = vmalloc(alloc_bytes);
1590 return ERR_PTR(-ENOMEM);
1592 if (total_bytes >= sizeof(*data)) {
1593 data->bytes_left = total_bytes - sizeof(*data);
1594 data->bytes_missing = 0;
1596 data->bytes_missing = sizeof(*data) - total_bytes;
1597 data->bytes_left = 0;
1601 data->elem_missed = 0;
1607 * allocates space to return multiple file system paths for an inode.
1608 * total_bytes to allocate are passed, note that space usable for actual path
1609 * information will be total_bytes - sizeof(struct inode_fs_paths).
1610 * the returned pointer must be freed with free_ipath() in the end.
1612 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1613 struct btrfs_path *path)
1615 struct inode_fs_paths *ifp;
1616 struct btrfs_data_container *fspath;
1618 fspath = init_data_container(total_bytes);
1620 return (void *)fspath;
1622 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1625 return ERR_PTR(-ENOMEM);
1628 ifp->btrfs_path = path;
1629 ifp->fspath = fspath;
1630 ifp->fs_root = fs_root;
1635 void free_ipath(struct inode_fs_paths *ipath)
1639 vfree(ipath->fspath);