]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
f2fs: fix unlocked nat set cache operation
authorWanpeng Li <wanpeng.li@linux.intel.com>
Mon, 9 Mar 2015 03:00:55 +0000 (11:00 +0800)
committerJaegeuk Kim <jaegeuk@kernel.org>
Fri, 10 Apr 2015 22:08:39 +0000 (15:08 -0700)
nm_i->nat_tree_lock is used to sync both the operations of nat entry
cache tree and nat set cache tree, however, it isn't held when flush
nat entries during checkpoint which lead to potential race, this patch
fix it by holding the lock when gang lookup nat set cache and delete
item from nat set cache.

Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
fs/f2fs/node.c

index 4687eae6c11632802c7154d8a828148af0d82dbf..8ab0cf1930bd2f4a1aa9ae11827c961b204d8519 100644 (file)
@@ -1830,6 +1830,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
        struct f2fs_nat_block *nat_blk;
        struct nat_entry *ne, *cur;
        struct page *page = NULL;
+       struct f2fs_nm_info *nm_i = NM_I(sbi);
 
        /*
         * there are two steps to flush nat entries:
@@ -1883,7 +1884,9 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
 
        f2fs_bug_on(sbi, set->entry_cnt);
 
+       down_write(&nm_i->nat_tree_lock);
        radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
+       up_write(&nm_i->nat_tree_lock);
        kmem_cache_free(nat_entry_set_slab, set);
 }
 
@@ -1911,6 +1914,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
        if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
                remove_nats_in_journal(sbi);
 
+       down_write(&nm_i->nat_tree_lock);
        while ((found = __gang_lookup_nat_set(nm_i,
                                        set_idx, SETVEC_SIZE, setvec))) {
                unsigned idx;
@@ -1919,6 +1923,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
                        __adjust_nat_entry_set(setvec[idx], &sets,
                                                        MAX_NAT_JENTRIES(sum));
        }
+       up_write(&nm_i->nat_tree_lock);
 
        /* flush dirty nats in nat entry set */
        list_for_each_entry_safe(set, tmp, &sets, set_list)