]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/extent-tree.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / fs / btrfs / extent-tree.c
index 5c84eea60703fe1239613545c1d197cf618281a3..e390451c72e6cdb93492e519cea82d5d7b3dfaf9 100644 (file)
@@ -131,6 +131,16 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
        if (atomic_dec_and_test(&cache->count)) {
                WARN_ON(cache->pinned > 0);
                WARN_ON(cache->reserved > 0);
+
+               /*
+                * If not empty, someone is still holding mutex of
+                * full_stripe_lock, which can only be released by caller.
+                * And it will definitely cause use-after-free when caller
+                * tries to release full stripe lock.
+                *
+                * No better way to resolve, but only to warn.
+                */
+               WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
                kfree(cache->free_space_ctl);
                kfree(cache);
        }
@@ -892,7 +902,7 @@ search_again:
        head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
        if (head) {
                if (!mutex_trylock(&head->mutex)) {
-                       atomic_inc(&head->node.refs);
+                       refcount_inc(&head->node.refs);
                        spin_unlock(&delayed_refs->lock);
 
                        btrfs_release_path(path);
@@ -2980,7 +2990,7 @@ again:
                                struct btrfs_delayed_ref_node *ref;
 
                                ref = &head->node;
-                               atomic_inc(&ref->refs);
+                               refcount_inc(&ref->refs);
 
                                spin_unlock(&delayed_refs->lock);
                                /*
@@ -3003,7 +3013,6 @@ again:
                goto again;
        }
 out:
-       assert_qgroups_uptodate(trans);
        trans->can_flush_pending_bgs = can_flush_pending_bgs;
        return 0;
 }
@@ -3057,7 +3066,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
        }
 
        if (!mutex_trylock(&head->mutex)) {
-               atomic_inc(&head->node.refs);
+               refcount_inc(&head->node.refs);
                spin_unlock(&delayed_refs->lock);
 
                btrfs_release_path(path);
@@ -3443,7 +3452,8 @@ again:
                /*
                 * don't bother trying to write stuff out _if_
                 * a) we're not cached,
-                * b) we're with nospace_cache mount option.
+                * b) we're with nospace_cache mount option,
+                * c) we're with v2 space_cache (FREE_SPACE_TREE).
                 */
                dcs = BTRFS_DC_WRITTEN;
                spin_unlock(&block_group->lock);
@@ -9917,6 +9927,7 @@ btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
        btrfs_init_free_space_ctl(cache);
        atomic_set(&cache->trimming, 0);
        mutex_init(&cache->free_space_lock);
+       btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
 
        return cache;
 }