]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/extent-tree.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / fs / btrfs / extent-tree.c
index be5477676cc829e4efe89349fc9b7df540fd0dff..e390451c72e6cdb93492e519cea82d5d7b3dfaf9 100644 (file)
@@ -131,6 +131,16 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
        if (atomic_dec_and_test(&cache->count)) {
                WARN_ON(cache->pinned > 0);
                WARN_ON(cache->reserved > 0);
+
+               /*
+                * If not empty, someone is still holding mutex of
+                * full_stripe_lock, which can only be released by caller.
+                * And it will definitely cause use-after-free when caller
+                * tries to release full stripe lock.
+                *
+                * No better way to resolve, but only to warn.
+                */
+               WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
                kfree(cache->free_space_ctl);
                kfree(cache);
        }
@@ -316,14 +326,14 @@ get_caching_control(struct btrfs_block_group_cache *cache)
        }
 
        ctl = cache->caching_ctl;
-       atomic_inc(&ctl->count);
+       refcount_inc(&ctl->count);
        spin_unlock(&cache->lock);
        return ctl;
 }
 
 static void put_caching_control(struct btrfs_caching_control *ctl)
 {
-       if (atomic_dec_and_test(&ctl->count))
+       if (refcount_dec_and_test(&ctl->count))
                kfree(ctl);
 }
 
@@ -599,7 +609,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        init_waitqueue_head(&caching_ctl->wait);
        caching_ctl->block_group = cache;
        caching_ctl->progress = cache->key.objectid;
-       atomic_set(&caching_ctl->count, 1);
+       refcount_set(&caching_ctl->count, 1);
        btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
                        caching_thread, NULL, NULL);
 
@@ -620,7 +630,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                struct btrfs_caching_control *ctl;
 
                ctl = cache->caching_ctl;
-               atomic_inc(&ctl->count);
+               refcount_inc(&ctl->count);
                prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
                spin_unlock(&cache->lock);
 
@@ -707,7 +717,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
        }
 
        down_write(&fs_info->commit_root_sem);
-       atomic_inc(&caching_ctl->count);
+       refcount_inc(&caching_ctl->count);
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
        up_write(&fs_info->commit_root_sem);
 
@@ -892,7 +902,7 @@ search_again:
        head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
        if (head) {
                if (!mutex_trylock(&head->mutex)) {
-                       atomic_inc(&head->node.refs);
+                       refcount_inc(&head->node.refs);
                        spin_unlock(&delayed_refs->lock);
 
                        btrfs_release_path(path);
@@ -2980,7 +2990,7 @@ again:
                                struct btrfs_delayed_ref_node *ref;
 
                                ref = &head->node;
-                               atomic_inc(&ref->refs);
+                               refcount_inc(&ref->refs);
 
                                spin_unlock(&delayed_refs->lock);
                                /*
@@ -3003,7 +3013,6 @@ again:
                goto again;
        }
 out:
-       assert_qgroups_uptodate(trans);
        trans->can_flush_pending_bgs = can_flush_pending_bgs;
        return 0;
 }
@@ -3057,7 +3066,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
        }
 
        if (!mutex_trylock(&head->mutex)) {
-               atomic_inc(&head->node.refs);
+               refcount_inc(&head->node.refs);
                spin_unlock(&delayed_refs->lock);
 
                btrfs_release_path(path);
@@ -3443,7 +3452,8 @@ again:
                /*
                 * don't bother trying to write stuff out _if_
                 * a) we're not cached,
-                * b) we're with nospace_cache mount option.
+                * b) we're with nospace_cache mount option,
+                * c) we're with v2 space_cache (FREE_SPACE_TREE).
                 */
                dcs = BTRFS_DC_WRITTEN;
                spin_unlock(&block_group->lock);
@@ -9917,6 +9927,7 @@ btrfs_create_block_group_cache(struct btrfs_fs_info *fs_info,
        btrfs_init_free_space_ctl(cache);
        atomic_set(&cache->trimming, 0);
        mutex_init(&cache->free_space_lock);
+       btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
 
        return cache;
 }
@@ -10416,7 +10427,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                                    &fs_info->caching_block_groups, list)
                                if (ctl->block_group == block_group) {
                                        caching_ctl = ctl;
-                                       atomic_inc(&caching_ctl->count);
+                                       refcount_inc(&caching_ctl->count);
                                        break;
                                }
                }
@@ -10850,7 +10861,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
                spin_lock(&fs_info->trans_lock);
                trans = fs_info->running_transaction;
                if (trans)
-                       atomic_inc(&trans->use_count);
+                       refcount_inc(&trans->use_count);
                spin_unlock(&fs_info->trans_lock);
 
                ret = find_free_dev_extent_start(trans, device, minlen, start,