1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Copyright (C) 2009 Oracle. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/sort.h>
19 #define MLOG_MASK_PREFIX ML_REFCOUNT
20 #include <cluster/masklog.h>
28 #include "buffer_head_io.h"
29 #include "blockcheck.h"
30 #include "refcounttree.h"
33 #include "extent_map.h"
36 #include <linux/bio.h>
37 #include <linux/blkdev.h>
38 #include <linux/gfp.h>
39 #include <linux/slab.h>
40 #include <linux/writeback.h>
41 #include <linux/pagevec.h>
42 #include <linux/swap.h>
44 struct ocfs2_cow_context {
48 struct ocfs2_extent_tree di_et;
49 struct ocfs2_caching_info *ref_ci;
50 struct buffer_head *ref_root_bh;
51 struct ocfs2_alloc_context *meta_ac;
52 struct ocfs2_alloc_context *data_ac;
53 struct ocfs2_cached_dealloc_ctxt dealloc;
56 static inline struct ocfs2_refcount_tree *
57 cache_info_to_refcount(struct ocfs2_caching_info *ci)
59 return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
62 static int ocfs2_validate_refcount_block(struct super_block *sb,
63 struct buffer_head *bh)
66 struct ocfs2_refcount_block *rb =
67 (struct ocfs2_refcount_block *)bh->b_data;
69 mlog(0, "Validating refcount block %llu\n",
70 (unsigned long long)bh->b_blocknr);
72 BUG_ON(!buffer_uptodate(bh));
75 * If the ecc fails, we return the error but otherwise
76 * leave the filesystem running. We know any error is
77 * local to this block.
79 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
81 mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
82 (unsigned long long)bh->b_blocknr);
87 if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
89 "Refcount block #%llu has bad signature %.*s",
90 (unsigned long long)bh->b_blocknr, 7,
95 if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
97 "Refcount block #%llu has an invalid rf_blkno "
99 (unsigned long long)bh->b_blocknr,
100 (unsigned long long)le64_to_cpu(rb->rf_blkno));
104 if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
106 "Refcount block #%llu has an invalid "
107 "rf_fs_generation of #%u",
108 (unsigned long long)bh->b_blocknr,
109 le32_to_cpu(rb->rf_fs_generation));
116 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
118 struct buffer_head **bh)
121 struct buffer_head *tmp = *bh;
123 rc = ocfs2_read_block(ci, rb_blkno, &tmp,
124 ocfs2_validate_refcount_block);
126 /* If ocfs2_read_block() got us a new bh, pass it up. */
133 static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
135 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
140 static struct super_block *
141 ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
143 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
148 static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
150 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
152 spin_lock(&rf->rf_lock);
155 static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
157 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
159 spin_unlock(&rf->rf_lock);
162 static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
164 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
166 mutex_lock(&rf->rf_io_mutex);
169 static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
171 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
173 mutex_unlock(&rf->rf_io_mutex);
176 static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
177 .co_owner = ocfs2_refcount_cache_owner,
178 .co_get_super = ocfs2_refcount_cache_get_super,
179 .co_cache_lock = ocfs2_refcount_cache_lock,
180 .co_cache_unlock = ocfs2_refcount_cache_unlock,
181 .co_io_lock = ocfs2_refcount_cache_io_lock,
182 .co_io_unlock = ocfs2_refcount_cache_io_unlock,
185 static struct ocfs2_refcount_tree *
186 ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
188 struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
189 struct ocfs2_refcount_tree *tree = NULL;
192 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
194 if (blkno < tree->rf_blkno)
196 else if (blkno > tree->rf_blkno)
205 /* osb_lock is already locked. */
206 static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
207 struct ocfs2_refcount_tree *new)
209 u64 rf_blkno = new->rf_blkno;
210 struct rb_node *parent = NULL;
211 struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
212 struct ocfs2_refcount_tree *tmp;
217 tmp = rb_entry(parent, struct ocfs2_refcount_tree,
220 if (rf_blkno < tmp->rf_blkno)
222 else if (rf_blkno > tmp->rf_blkno)
225 /* This should never happen! */
226 mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
227 (unsigned long long)rf_blkno);
232 rb_link_node(&new->rf_node, parent, p);
233 rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
236 static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
238 ocfs2_metadata_cache_exit(&tree->rf_ci);
239 ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
240 ocfs2_lock_res_free(&tree->rf_lockres);
245 ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
246 struct ocfs2_refcount_tree *tree)
248 rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
249 if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
250 osb->osb_ref_tree_lru = NULL;
253 static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
254 struct ocfs2_refcount_tree *tree)
256 spin_lock(&osb->osb_lock);
257 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
258 spin_unlock(&osb->osb_lock);
261 void ocfs2_kref_remove_refcount_tree(struct kref *kref)
263 struct ocfs2_refcount_tree *tree =
264 container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
266 ocfs2_free_refcount_tree(tree);
270 ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
272 kref_get(&tree->rf_getcnt);
276 ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
278 kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
281 static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
282 struct super_block *sb)
284 ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
285 mutex_init(&new->rf_io_mutex);
287 spin_lock_init(&new->rf_lock);
290 static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
291 struct ocfs2_refcount_tree *new,
292 u64 rf_blkno, u32 generation)
294 init_rwsem(&new->rf_sem);
295 ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
296 rf_blkno, generation);
299 static struct ocfs2_refcount_tree*
300 ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
302 struct ocfs2_refcount_tree *new;
304 new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
308 new->rf_blkno = rf_blkno;
309 kref_init(&new->rf_getcnt);
310 ocfs2_init_refcount_tree_ci(new, osb->sb);
315 static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
316 struct ocfs2_refcount_tree **ret_tree)
319 struct ocfs2_refcount_tree *tree, *new = NULL;
320 struct buffer_head *ref_root_bh = NULL;
321 struct ocfs2_refcount_block *ref_rb;
323 spin_lock(&osb->osb_lock);
324 if (osb->osb_ref_tree_lru &&
325 osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
326 tree = osb->osb_ref_tree_lru;
328 tree = ocfs2_find_refcount_tree(osb, rf_blkno);
332 spin_unlock(&osb->osb_lock);
334 new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
341 * We need the generation to create the refcount tree lock and since
342 * it isn't changed during the tree modification, we are safe here to
343 * read without protection.
344 * We also have to purge the cache after we create the lock since the
345 * refcount block may have the stale data. It can only be trusted when
346 * we hold the refcount lock.
348 ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
351 ocfs2_metadata_cache_exit(&new->rf_ci);
356 ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
357 new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
358 ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
360 ocfs2_metadata_cache_purge(&new->rf_ci);
362 spin_lock(&osb->osb_lock);
363 tree = ocfs2_find_refcount_tree(osb, rf_blkno);
367 ocfs2_insert_refcount_tree(osb, new);
375 osb->osb_ref_tree_lru = tree;
377 spin_unlock(&osb->osb_lock);
380 ocfs2_free_refcount_tree(new);
386 static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
389 struct buffer_head *di_bh = NULL;
390 struct ocfs2_dinode *di;
392 ret = ocfs2_read_inode_block(inode, &di_bh);
398 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
400 di = (struct ocfs2_dinode *)di_bh->b_data;
401 *ref_blkno = le64_to_cpu(di->i_refcount_loc);
407 static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
408 struct ocfs2_refcount_tree *tree, int rw)
412 ret = ocfs2_refcount_lock(tree, rw);
419 down_write(&tree->rf_sem);
421 down_read(&tree->rf_sem);
428 * Lock the refcount tree pointed by ref_blkno and return the tree.
429 * In most case, we lock the tree and read the refcount block.
430 * So read it here if the caller really needs it.
432 * If the tree has been re-created by other node, it will free the
433 * old one and re-create it.
435 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
436 u64 ref_blkno, int rw,
437 struct ocfs2_refcount_tree **ret_tree,
438 struct buffer_head **ref_bh)
440 int ret, delete_tree = 0;
441 struct ocfs2_refcount_tree *tree = NULL;
442 struct buffer_head *ref_root_bh = NULL;
443 struct ocfs2_refcount_block *rb;
446 ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
452 ocfs2_refcount_tree_get(tree);
454 ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
457 ocfs2_refcount_tree_put(tree);
461 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
465 ocfs2_unlock_refcount_tree(osb, tree, rw);
466 ocfs2_refcount_tree_put(tree);
470 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
472 * If the refcount block has been freed and re-created, we may need
473 * to recreate the refcount tree also.
475 * Here we just remove the tree from the rb-tree, and the last
476 * kref holder will unlock and delete this refcount_tree.
477 * Then we goto "again" and ocfs2_get_refcount_tree will create
478 * the new refcount tree for us.
480 if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
481 if (!tree->rf_removed) {
482 ocfs2_erase_refcount_tree_from_list(osb, tree);
483 tree->rf_removed = 1;
487 ocfs2_unlock_refcount_tree(osb, tree, rw);
489 * We get an extra reference when we create the refcount
490 * tree, so another put will destroy it.
493 ocfs2_refcount_tree_put(tree);
501 *ref_bh = ref_root_bh;
509 int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw,
510 struct ocfs2_refcount_tree **ret_tree,
511 struct buffer_head **ref_bh)
516 ret = ocfs2_get_refcount_block(inode, &ref_blkno);
522 return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno,
523 rw, ret_tree, ref_bh);
526 void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
527 struct ocfs2_refcount_tree *tree, int rw)
530 up_write(&tree->rf_sem);
532 up_read(&tree->rf_sem);
534 ocfs2_refcount_unlock(tree, rw);
535 ocfs2_refcount_tree_put(tree);
538 void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
540 struct rb_node *node;
541 struct ocfs2_refcount_tree *tree;
542 struct rb_root *root = &osb->osb_rf_lock_tree;
544 while ((node = rb_last(root)) != NULL) {
545 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
547 mlog(0, "Purge tree %llu\n",
548 (unsigned long long) tree->rf_blkno);
550 rb_erase(&tree->rf_node, root);
551 ocfs2_free_refcount_tree(tree);
556 * Create a refcount tree for an inode.
557 * We take for granted that the inode is already locked.
559 static int ocfs2_create_refcount_tree(struct inode *inode,
560 struct buffer_head *di_bh)
563 handle_t *handle = NULL;
564 struct ocfs2_alloc_context *meta_ac = NULL;
565 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
566 struct ocfs2_inode_info *oi = OCFS2_I(inode);
567 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
568 struct buffer_head *new_bh = NULL;
569 struct ocfs2_refcount_block *rb;
570 struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
571 u16 suballoc_bit_start;
575 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
577 mlog(0, "create tree for inode %lu\n", inode->i_ino);
579 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
585 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
586 if (IS_ERR(handle)) {
587 ret = PTR_ERR(handle);
592 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
593 OCFS2_JOURNAL_ACCESS_WRITE);
599 ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
600 &suballoc_bit_start, &num_got,
607 new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
614 new_bh = sb_getblk(inode->i_sb, first_blkno);
615 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
617 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
618 OCFS2_JOURNAL_ACCESS_CREATE);
624 /* Initialize ocfs2_refcount_block. */
625 rb = (struct ocfs2_refcount_block *)new_bh->b_data;
626 memset(rb, 0, inode->i_sb->s_blocksize);
627 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
628 rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
629 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
630 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
631 rb->rf_blkno = cpu_to_le64(first_blkno);
632 rb->rf_count = cpu_to_le32(1);
633 rb->rf_records.rl_count =
634 cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
635 spin_lock(&osb->osb_lock);
636 rb->rf_generation = osb->s_next_generation++;
637 spin_unlock(&osb->osb_lock);
639 ocfs2_journal_dirty(handle, new_bh);
641 spin_lock(&oi->ip_lock);
642 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
643 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
644 di->i_refcount_loc = cpu_to_le64(first_blkno);
645 spin_unlock(&oi->ip_lock);
647 mlog(0, "created tree for inode %lu, refblock %llu\n",
648 inode->i_ino, (unsigned long long)first_blkno);
650 ocfs2_journal_dirty(handle, di_bh);
653 * We have to init the tree lock here since it will use
654 * the generation number to create it.
656 new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
657 ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
658 new_tree->rf_generation);
660 spin_lock(&osb->osb_lock);
661 tree = ocfs2_find_refcount_tree(osb, first_blkno);
664 * We've just created a new refcount tree in this block. If
665 * we found a refcount tree on the ocfs2_super, it must be
666 * one we just deleted. We free the old tree before
667 * inserting the new tree.
669 BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
671 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
672 ocfs2_insert_refcount_tree(osb, new_tree);
673 spin_unlock(&osb->osb_lock);
676 ocfs2_refcount_tree_put(tree);
679 ocfs2_commit_trans(osb, handle);
683 ocfs2_metadata_cache_exit(&new_tree->rf_ci);
689 ocfs2_free_alloc_context(meta_ac);
694 static int ocfs2_set_refcount_tree(struct inode *inode,
695 struct buffer_head *di_bh,
699 handle_t *handle = NULL;
700 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
701 struct ocfs2_inode_info *oi = OCFS2_I(inode);
702 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
703 struct buffer_head *ref_root_bh = NULL;
704 struct ocfs2_refcount_block *rb;
705 struct ocfs2_refcount_tree *ref_tree;
707 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
709 ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
710 &ref_tree, &ref_root_bh);
716 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
717 if (IS_ERR(handle)) {
718 ret = PTR_ERR(handle);
723 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
724 OCFS2_JOURNAL_ACCESS_WRITE);
730 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
731 OCFS2_JOURNAL_ACCESS_WRITE);
737 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
738 le32_add_cpu(&rb->rf_count, 1);
740 ocfs2_journal_dirty(handle, ref_root_bh);
742 spin_lock(&oi->ip_lock);
743 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
744 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
745 di->i_refcount_loc = cpu_to_le64(refcount_loc);
746 spin_unlock(&oi->ip_lock);
747 ocfs2_journal_dirty(handle, di_bh);
750 ocfs2_commit_trans(osb, handle);
752 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
758 int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
760 int ret, delete_tree = 0;
761 handle_t *handle = NULL;
762 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
763 struct ocfs2_inode_info *oi = OCFS2_I(inode);
764 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
765 struct ocfs2_refcount_block *rb;
766 struct inode *alloc_inode = NULL;
767 struct buffer_head *alloc_bh = NULL;
768 struct buffer_head *blk_bh = NULL;
769 struct ocfs2_refcount_tree *ref_tree;
770 int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
771 u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
774 if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
778 ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
784 rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
787 * If we are the last user, we need to free the block.
788 * So lock the allocator ahead.
790 if (le32_to_cpu(rb->rf_count) == 1) {
791 blk = le64_to_cpu(rb->rf_blkno);
792 bit = le16_to_cpu(rb->rf_suballoc_bit);
793 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
795 alloc_inode = ocfs2_get_system_file_inode(osb,
796 EXTENT_ALLOC_SYSTEM_INODE,
797 le16_to_cpu(rb->rf_suballoc_slot));
803 mutex_lock(&alloc_inode->i_mutex);
805 ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
811 credits += OCFS2_SUBALLOC_FREE;
814 handle = ocfs2_start_trans(osb, credits);
815 if (IS_ERR(handle)) {
816 ret = PTR_ERR(handle);
821 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
822 OCFS2_JOURNAL_ACCESS_WRITE);
828 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
829 OCFS2_JOURNAL_ACCESS_WRITE);
835 spin_lock(&oi->ip_lock);
836 oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
837 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
838 di->i_refcount_loc = 0;
839 spin_unlock(&oi->ip_lock);
840 ocfs2_journal_dirty(handle, di_bh);
842 le32_add_cpu(&rb->rf_count , -1);
843 ocfs2_journal_dirty(handle, blk_bh);
847 ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
848 ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
849 alloc_bh, bit, bg_blkno, 1);
855 ocfs2_commit_trans(osb, handle);
858 ocfs2_inode_unlock(alloc_inode, 1);
863 mutex_unlock(&alloc_inode->i_mutex);
867 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
869 ocfs2_refcount_tree_put(ref_tree);
875 static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
876 struct buffer_head *ref_leaf_bh,
877 u64 cpos, unsigned int len,
878 struct ocfs2_refcount_rec *ret_rec,
882 struct ocfs2_refcount_block *rb =
883 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
884 struct ocfs2_refcount_rec *rec = NULL;
886 for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
887 rec = &rb->rf_records.rl_recs[i];
889 if (le64_to_cpu(rec->r_cpos) +
890 le32_to_cpu(rec->r_clusters) <= cpos)
892 else if (le64_to_cpu(rec->r_cpos) > cpos)
895 /* ok, cpos fail in this rec. Just return. */
902 /* We meet with a hole here, so fake the rec. */
903 ret_rec->r_cpos = cpu_to_le64(cpos);
904 ret_rec->r_refcount = 0;
905 if (i < le16_to_cpu(rb->rf_records.rl_used) &&
906 le64_to_cpu(rec->r_cpos) < cpos + len)
907 ret_rec->r_clusters =
908 cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
910 ret_rec->r_clusters = cpu_to_le32(len);
918 * Given a cpos and len, try to find the refcount record which contains cpos.
919 * 1. If cpos can be found in one refcount record, return the record.
920 * 2. If cpos can't be found, return a fake record which start from cpos
921 * and end at a small value between cpos+len and start of the next record.
922 * This fake record has r_refcount = 0.
924 static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
925 struct buffer_head *ref_root_bh,
926 u64 cpos, unsigned int len,
927 struct ocfs2_refcount_rec *ret_rec,
929 struct buffer_head **ret_bh)
931 int ret = 0, i, found;
933 struct ocfs2_extent_list *el;
934 struct ocfs2_extent_rec *tmp, *rec = NULL;
935 struct ocfs2_extent_block *eb;
936 struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
937 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
938 struct ocfs2_refcount_block *rb =
939 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
941 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
942 ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
944 *ret_bh = ref_root_bh;
950 low_cpos = cpos & OCFS2_32BIT_POS_MASK;
952 if (el->l_tree_depth) {
953 ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
959 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
962 if (el->l_tree_depth) {
964 "refcount tree %llu has non zero tree "
965 "depth in leaf btree tree block %llu\n",
966 (unsigned long long)ocfs2_metadata_cache_owner(ci),
967 (unsigned long long)eb_bh->b_blocknr);
974 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
975 rec = &el->l_recs[i];
977 if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
983 /* adjust len when we have ocfs2_extent_rec after it. */
984 if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
985 tmp = &el->l_recs[i+1];
987 if (le32_to_cpu(tmp->e_cpos) < cpos + len)
988 len = le32_to_cpu(tmp->e_cpos) - cpos;
991 ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
998 ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
1000 *ret_bh = ref_leaf_bh;
1006 enum ocfs2_ref_rec_contig {
1007 REF_CONTIG_NONE = 0,
1010 REF_CONTIG_LEFTRIGHT,
1013 static enum ocfs2_ref_rec_contig
1014 ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
1017 if ((rb->rf_records.rl_recs[index].r_refcount ==
1018 rb->rf_records.rl_recs[index + 1].r_refcount) &&
1019 (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
1020 le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
1021 le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
1022 return REF_CONTIG_RIGHT;
1024 return REF_CONTIG_NONE;
1027 static enum ocfs2_ref_rec_contig
1028 ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
1031 enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
1033 if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
1034 ret = ocfs2_refcount_rec_adjacent(rb, index);
1037 enum ocfs2_ref_rec_contig tmp;
1039 tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
1041 if (tmp == REF_CONTIG_RIGHT) {
1042 if (ret == REF_CONTIG_RIGHT)
1043 ret = REF_CONTIG_LEFTRIGHT;
1045 ret = REF_CONTIG_LEFT;
1052 static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
1055 BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
1056 rb->rf_records.rl_recs[index+1].r_refcount);
1058 le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
1059 le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
1061 if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
1062 memmove(&rb->rf_records.rl_recs[index + 1],
1063 &rb->rf_records.rl_recs[index + 2],
1064 sizeof(struct ocfs2_refcount_rec) *
1065 (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
1067 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
1068 0, sizeof(struct ocfs2_refcount_rec));
1069 le16_add_cpu(&rb->rf_records.rl_used, -1);
1073 * Merge the refcount rec if we are contiguous with the adjacent recs.
1075 static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
1078 enum ocfs2_ref_rec_contig contig =
1079 ocfs2_refcount_rec_contig(rb, index);
1081 if (contig == REF_CONTIG_NONE)
1084 if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
1089 ocfs2_rotate_refcount_rec_left(rb, index);
1091 if (contig == REF_CONTIG_LEFTRIGHT)
1092 ocfs2_rotate_refcount_rec_left(rb, index);
1096 * Change the refcount indexed by "index" in ref_bh.
1097 * If refcount reaches 0, remove it.
1099 static int ocfs2_change_refcount_rec(handle_t *handle,
1100 struct ocfs2_caching_info *ci,
1101 struct buffer_head *ref_leaf_bh,
1102 int index, int change)
1105 struct ocfs2_refcount_block *rb =
1106 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1107 struct ocfs2_refcount_list *rl = &rb->rf_records;
1108 struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
1110 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1111 OCFS2_JOURNAL_ACCESS_WRITE);
1117 mlog(0, "change index %d, old count %u, change %d\n", index,
1118 le32_to_cpu(rec->r_refcount), change);
1119 le32_add_cpu(&rec->r_refcount, change);
1121 if (!rec->r_refcount) {
1122 if (index != le16_to_cpu(rl->rl_used) - 1) {
1123 memmove(rec, rec + 1,
1124 (le16_to_cpu(rl->rl_used) - index - 1) *
1125 sizeof(struct ocfs2_refcount_rec));
1126 memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
1127 0, sizeof(struct ocfs2_refcount_rec));
1130 le16_add_cpu(&rl->rl_used, -1);
1132 ocfs2_refcount_rec_merge(rb, index);
1134 ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
1141 static int ocfs2_expand_inline_ref_root(handle_t *handle,
1142 struct ocfs2_caching_info *ci,
1143 struct buffer_head *ref_root_bh,
1144 struct buffer_head **ref_leaf_bh,
1145 struct ocfs2_alloc_context *meta_ac)
1148 u16 suballoc_bit_start;
1151 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1152 struct buffer_head *new_bh = NULL;
1153 struct ocfs2_refcount_block *new_rb;
1154 struct ocfs2_refcount_block *root_rb =
1155 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1157 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1158 OCFS2_JOURNAL_ACCESS_WRITE);
1164 ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
1165 &suballoc_bit_start, &num_got,
1172 new_bh = sb_getblk(sb, blkno);
1173 if (new_bh == NULL) {
1178 ocfs2_set_new_buffer_uptodate(ci, new_bh);
1180 ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1181 OCFS2_JOURNAL_ACCESS_CREATE);
1188 * Initialize ocfs2_refcount_block.
1189 * It should contain the same information as the old root.
1190 * so just memcpy it and change the corresponding field.
1192 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
1194 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1195 new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
1196 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1197 new_rb->rf_blkno = cpu_to_le64(blkno);
1198 new_rb->rf_cpos = cpu_to_le32(0);
1199 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1200 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1201 ocfs2_journal_dirty(handle, new_bh);
1203 /* Now change the root. */
1204 memset(&root_rb->rf_list, 0, sb->s_blocksize -
1205 offsetof(struct ocfs2_refcount_block, rf_list));
1206 root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
1207 root_rb->rf_clusters = cpu_to_le32(1);
1208 root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
1209 root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
1210 root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
1211 root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
1213 ocfs2_journal_dirty(handle, ref_root_bh);
1215 mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
1216 le16_to_cpu(new_rb->rf_records.rl_used));
1218 *ref_leaf_bh = new_bh;
1225 static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
1226 struct ocfs2_refcount_rec *next)
1228 if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
1229 ocfs2_get_ref_rec_low_cpos(next))
1235 static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
1237 const struct ocfs2_refcount_rec *l = a, *r = b;
1238 u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
1239 u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
1241 if (l_cpos > r_cpos)
1243 if (l_cpos < r_cpos)
1248 static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
1250 const struct ocfs2_refcount_rec *l = a, *r = b;
1251 u64 l_cpos = le64_to_cpu(l->r_cpos);
1252 u64 r_cpos = le64_to_cpu(r->r_cpos);
1254 if (l_cpos > r_cpos)
1256 if (l_cpos < r_cpos)
1261 static void swap_refcount_rec(void *a, void *b, int size)
1263 struct ocfs2_refcount_rec *l = a, *r = b, tmp;
1265 tmp = *(struct ocfs2_refcount_rec *)l;
1266 *(struct ocfs2_refcount_rec *)l =
1267 *(struct ocfs2_refcount_rec *)r;
1268 *(struct ocfs2_refcount_rec *)r = tmp;
1272 * The refcount cpos are ordered by their 64bit cpos,
1273 * But we will use the low 32 bit to be the e_cpos in the b-tree.
1274 * So we need to make sure that this pos isn't intersected with others.
1276 * Note: The refcount block is already sorted by their low 32 bit cpos,
1277 * So just try the middle pos first, and we will exit when we find
1278 * the good position.
1280 static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
1281 u32 *split_pos, int *split_index)
1283 int num_used = le16_to_cpu(rl->rl_used);
1284 int delta, middle = num_used / 2;
1286 for (delta = 0; delta < middle; delta++) {
1287 /* Let's check delta earlier than middle */
1288 if (ocfs2_refcount_rec_no_intersect(
1289 &rl->rl_recs[middle - delta - 1],
1290 &rl->rl_recs[middle - delta])) {
1291 *split_index = middle - delta;
1295 /* For even counts, don't walk off the end */
1296 if ((middle + delta + 1) == num_used)
1299 /* Now try delta past middle */
1300 if (ocfs2_refcount_rec_no_intersect(
1301 &rl->rl_recs[middle + delta],
1302 &rl->rl_recs[middle + delta + 1])) {
1303 *split_index = middle + delta + 1;
1308 if (delta >= middle)
1311 *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
1315 static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
1316 struct buffer_head *new_bh,
1319 int split_index = 0, num_moved, ret;
1321 struct ocfs2_refcount_block *rb =
1322 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1323 struct ocfs2_refcount_list *rl = &rb->rf_records;
1324 struct ocfs2_refcount_block *new_rb =
1325 (struct ocfs2_refcount_block *)new_bh->b_data;
1326 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
1328 mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
1329 (unsigned long long)ref_leaf_bh->b_blocknr,
1330 le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
1333 * XXX: Improvement later.
1334 * If we know all the high 32 bit cpos is the same, no need to sort.
1336 * In order to make the whole process safe, we do:
1337 * 1. sort the entries by their low 32 bit cpos first so that we can
1338 * find the split cpos easily.
1339 * 2. call ocfs2_insert_extent to insert the new refcount block.
1340 * 3. move the refcount rec to the new block.
1341 * 4. sort the entries by their 64 bit cpos.
1342 * 5. dirty the new_rb and rb.
1344 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1345 sizeof(struct ocfs2_refcount_rec),
1346 cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
1348 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
1354 new_rb->rf_cpos = cpu_to_le32(cpos);
1356 /* move refcount records starting from split_index to the new block. */
1357 num_moved = le16_to_cpu(rl->rl_used) - split_index;
1358 memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
1359 num_moved * sizeof(struct ocfs2_refcount_rec));
1361 /*ok, remove the entries we just moved over to the other block. */
1362 memset(&rl->rl_recs[split_index], 0,
1363 num_moved * sizeof(struct ocfs2_refcount_rec));
1365 /* change old and new rl_used accordingly. */
1366 le16_add_cpu(&rl->rl_used, -num_moved);
1367 new_rl->rl_used = cpu_to_le32(num_moved);
1369 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1370 sizeof(struct ocfs2_refcount_rec),
1371 cmp_refcount_rec_by_cpos, swap_refcount_rec);
1373 sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
1374 sizeof(struct ocfs2_refcount_rec),
1375 cmp_refcount_rec_by_cpos, swap_refcount_rec);
1381 static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1382 struct ocfs2_caching_info *ci,
1383 struct buffer_head *ref_root_bh,
1384 struct buffer_head *ref_leaf_bh,
1385 struct ocfs2_alloc_context *meta_ac)
1388 u16 suballoc_bit_start;
1389 u32 num_got, new_cpos;
1391 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1392 struct ocfs2_refcount_block *root_rb =
1393 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1394 struct buffer_head *new_bh = NULL;
1395 struct ocfs2_refcount_block *new_rb;
1396 struct ocfs2_extent_tree ref_et;
1398 BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
1400 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1401 OCFS2_JOURNAL_ACCESS_WRITE);
1407 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1408 OCFS2_JOURNAL_ACCESS_WRITE);
1414 ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
1415 &suballoc_bit_start, &num_got,
1422 new_bh = sb_getblk(sb, blkno);
1423 if (new_bh == NULL) {
1428 ocfs2_set_new_buffer_uptodate(ci, new_bh);
1430 ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1431 OCFS2_JOURNAL_ACCESS_CREATE);
1437 /* Initialize ocfs2_refcount_block. */
1438 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1439 memset(new_rb, 0, sb->s_blocksize);
1440 strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
1441 new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
1442 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1443 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
1444 new_rb->rf_blkno = cpu_to_le64(blkno);
1445 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1446 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1447 new_rb->rf_records.rl_count =
1448 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
1449 new_rb->rf_generation = root_rb->rf_generation;
1451 ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
1457 ocfs2_journal_dirty(handle, ref_leaf_bh);
1458 ocfs2_journal_dirty(handle, new_bh);
1460 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
1462 mlog(0, "insert new leaf block %llu at %u\n",
1463 (unsigned long long)new_bh->b_blocknr, new_cpos);
1465 /* Insert the new leaf block with the specific offset cpos. */
1466 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1476 static int ocfs2_expand_refcount_tree(handle_t *handle,
1477 struct ocfs2_caching_info *ci,
1478 struct buffer_head *ref_root_bh,
1479 struct buffer_head *ref_leaf_bh,
1480 struct ocfs2_alloc_context *meta_ac)
1483 struct buffer_head *expand_bh = NULL;
1485 if (ref_root_bh == ref_leaf_bh) {
1487 * the old root bh hasn't been expanded to a b-tree,
1488 * so expand it first.
1490 ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
1491 &expand_bh, meta_ac);
1497 expand_bh = ref_leaf_bh;
1502 /* Now add a new refcount block into the tree.*/
1503 ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
1504 expand_bh, meta_ac);
1513 * Adjust the extent rec in b-tree representing ref_leaf_bh.
1515 * Only called when we have inserted a new refcount rec at index 0
1516 * which means ocfs2_extent_rec.e_cpos may need some change.
1518 static int ocfs2_adjust_refcount_rec(handle_t *handle,
1519 struct ocfs2_caching_info *ci,
1520 struct buffer_head *ref_root_bh,
1521 struct buffer_head *ref_leaf_bh,
1522 struct ocfs2_refcount_rec *rec)
1525 u32 new_cpos, old_cpos;
1526 struct ocfs2_path *path = NULL;
1527 struct ocfs2_extent_tree et;
1528 struct ocfs2_refcount_block *rb =
1529 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1530 struct ocfs2_extent_list *el;
1532 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
1535 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1536 old_cpos = le32_to_cpu(rb->rf_cpos);
1537 new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
1538 if (old_cpos <= new_cpos)
1541 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1543 path = ocfs2_new_path_from_et(&et);
1550 ret = ocfs2_find_path(ci, path, old_cpos);
1557 * 2 more credits, one for the leaf refcount block, one for
1558 * the extent block contains the extent rec.
1560 ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
1566 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1567 OCFS2_JOURNAL_ACCESS_WRITE);
1573 ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
1574 OCFS2_JOURNAL_ACCESS_WRITE);
1580 /* change the leaf extent block first. */
1581 el = path_leaf_el(path);
1583 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
1584 if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
1587 BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
1589 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
1591 /* change the r_cpos in the leaf block. */
1592 rb->rf_cpos = cpu_to_le32(new_cpos);
1594 ocfs2_journal_dirty(handle, path_leaf_bh(path));
1595 ocfs2_journal_dirty(handle, ref_leaf_bh);
1598 ocfs2_free_path(path);
1602 static int ocfs2_insert_refcount_rec(handle_t *handle,
1603 struct ocfs2_caching_info *ci,
1604 struct buffer_head *ref_root_bh,
1605 struct buffer_head *ref_leaf_bh,
1606 struct ocfs2_refcount_rec *rec,
1608 struct ocfs2_alloc_context *meta_ac)
1611 struct ocfs2_refcount_block *rb =
1612 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1613 struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1614 struct buffer_head *new_bh = NULL;
1616 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1618 if (rf_list->rl_used == rf_list->rl_count) {
1619 u64 cpos = le64_to_cpu(rec->r_cpos);
1620 u32 len = le32_to_cpu(rec->r_clusters);
1622 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1623 ref_leaf_bh, meta_ac);
1629 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1630 cpos, len, NULL, &index,
1637 ref_leaf_bh = new_bh;
1638 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1639 rf_list = &rb->rf_records;
1642 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1643 OCFS2_JOURNAL_ACCESS_WRITE);
1649 if (index < le16_to_cpu(rf_list->rl_used))
1650 memmove(&rf_list->rl_recs[index + 1],
1651 &rf_list->rl_recs[index],
1652 (le16_to_cpu(rf_list->rl_used) - index) *
1653 sizeof(struct ocfs2_refcount_rec));
1655 mlog(0, "insert refcount record start %llu, len %u, count %u "
1656 "to leaf block %llu at index %d\n",
1657 (unsigned long long)le64_to_cpu(rec->r_cpos),
1658 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
1659 (unsigned long long)ref_leaf_bh->b_blocknr, index);
1661 rf_list->rl_recs[index] = *rec;
1663 le16_add_cpu(&rf_list->rl_used, 1);
1665 ocfs2_refcount_rec_merge(rb, index);
1667 ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
1674 ret = ocfs2_adjust_refcount_rec(handle, ci,
1686 * Split the refcount_rec indexed by "index" in ref_leaf_bh.
1687 * This is much simple than our b-tree code.
1688 * split_rec is the new refcount rec we want to insert.
1689 * If split_rec->r_refcount > 0, we are changing the refcount(in case we
1690 * increase refcount or decrease a refcount to non-zero).
1691 * If split_rec->r_refcount == 0, we are punching a hole in current refcount
1692 * rec( in case we decrease a refcount to zero).
1694 static int ocfs2_split_refcount_rec(handle_t *handle,
1695 struct ocfs2_caching_info *ci,
1696 struct buffer_head *ref_root_bh,
1697 struct buffer_head *ref_leaf_bh,
1698 struct ocfs2_refcount_rec *split_rec,
1700 struct ocfs2_alloc_context *meta_ac,
1701 struct ocfs2_cached_dealloc_ctxt *dealloc)
1705 struct ocfs2_refcount_block *rb =
1706 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1707 struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1708 struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
1709 struct ocfs2_refcount_rec *tail_rec = NULL;
1710 struct buffer_head *new_bh = NULL;
1712 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1714 mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
1715 le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
1716 le64_to_cpu(split_rec->r_cpos),
1717 le32_to_cpu(split_rec->r_clusters));
1720 * If we just need to split the header or tail clusters,
1721 * no more recs are needed, just split is OK.
1722 * Otherwise we at least need one new recs.
1724 if (!split_rec->r_refcount &&
1725 (split_rec->r_cpos == orig_rec->r_cpos ||
1726 le64_to_cpu(split_rec->r_cpos) +
1727 le32_to_cpu(split_rec->r_clusters) ==
1728 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1734 * We need one more rec if we split in the middle and the new rec have
1735 * some refcount in it.
1737 if (split_rec->r_refcount &&
1738 (split_rec->r_cpos != orig_rec->r_cpos &&
1739 le64_to_cpu(split_rec->r_cpos) +
1740 le32_to_cpu(split_rec->r_clusters) !=
1741 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1744 /* If the leaf block don't have enough record, expand it. */
1745 if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) {
1746 struct ocfs2_refcount_rec tmp_rec;
1747 u64 cpos = le64_to_cpu(orig_rec->r_cpos);
1748 len = le32_to_cpu(orig_rec->r_clusters);
1749 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1750 ref_leaf_bh, meta_ac);
1757 * We have to re-get it since now cpos may be moved to
1758 * another leaf block.
1760 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1761 cpos, len, &tmp_rec, &index,
1768 ref_leaf_bh = new_bh;
1769 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1770 rf_list = &rb->rf_records;
1771 orig_rec = &rf_list->rl_recs[index];
1774 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1775 OCFS2_JOURNAL_ACCESS_WRITE);
1782 * We have calculated out how many new records we need and store
1783 * in recs_need, so spare enough space first by moving the records
1784 * after "index" to the end.
1786 if (index != le16_to_cpu(rf_list->rl_used) - 1)
1787 memmove(&rf_list->rl_recs[index + 1 + recs_need],
1788 &rf_list->rl_recs[index + 1],
1789 (le16_to_cpu(rf_list->rl_used) - index - 1) *
1790 sizeof(struct ocfs2_refcount_rec));
1792 len = (le64_to_cpu(orig_rec->r_cpos) +
1793 le32_to_cpu(orig_rec->r_clusters)) -
1794 (le64_to_cpu(split_rec->r_cpos) +
1795 le32_to_cpu(split_rec->r_clusters));
1798 * If we have "len", the we will split in the tail and move it
1799 * to the end of the space we have just spared.
1802 tail_rec = &rf_list->rl_recs[index + recs_need];
1804 memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
1805 le64_add_cpu(&tail_rec->r_cpos,
1806 le32_to_cpu(tail_rec->r_clusters) - len);
1807 tail_rec->r_clusters = le32_to_cpu(len);
1811 * If the split pos isn't the same as the original one, we need to
1812 * split in the head.
1814 * Note: We have the chance that split_rec.r_refcount = 0,
1815 * recs_need = 0 and len > 0, which means we just cut the head from
1816 * the orig_rec and in that case we have done some modification in
1817 * orig_rec above, so the check for r_cpos is faked.
1819 if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
1820 len = le64_to_cpu(split_rec->r_cpos) -
1821 le64_to_cpu(orig_rec->r_cpos);
1822 orig_rec->r_clusters = cpu_to_le32(len);
1826 le16_add_cpu(&rf_list->rl_used, recs_need);
1828 if (split_rec->r_refcount) {
1829 rf_list->rl_recs[index] = *split_rec;
1830 mlog(0, "insert refcount record start %llu, len %u, count %u "
1831 "to leaf block %llu at index %d\n",
1832 (unsigned long long)le64_to_cpu(split_rec->r_cpos),
1833 le32_to_cpu(split_rec->r_clusters),
1834 le32_to_cpu(split_rec->r_refcount),
1835 (unsigned long long)ref_leaf_bh->b_blocknr, index);
1837 ocfs2_refcount_rec_merge(rb, index);
1840 ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
1849 static int __ocfs2_increase_refcount(handle_t *handle,
1850 struct ocfs2_caching_info *ci,
1851 struct buffer_head *ref_root_bh,
1853 struct ocfs2_alloc_context *meta_ac,
1854 struct ocfs2_cached_dealloc_ctxt *dealloc)
1857 struct buffer_head *ref_leaf_bh = NULL;
1858 struct ocfs2_refcount_rec rec;
1859 unsigned int set_len = 0;
1861 mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
1862 (unsigned long long)ocfs2_metadata_cache_owner(ci),
1863 (unsigned long long)cpos, len);
1866 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1867 cpos, len, &rec, &index,
1874 set_len = le32_to_cpu(rec.r_clusters);
1877 * Here we may meet with 3 situations:
1879 * 1. If we find an already existing record, and the length
1880 * is the same, cool, we just need to increase the r_refcount
1882 * 2. If we find a hole, just insert it with r_refcount = 1.
1883 * 3. If we are in the middle of one extent record, split
1886 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
1888 mlog(0, "increase refcount rec, start %llu, len %u, "
1889 "count %u\n", (unsigned long long)cpos, set_len,
1890 le32_to_cpu(rec.r_refcount));
1891 ret = ocfs2_change_refcount_rec(handle, ci,
1892 ref_leaf_bh, index, 1);
1897 } else if (!rec.r_refcount) {
1898 rec.r_refcount = cpu_to_le32(1);
1900 mlog(0, "insert refcount rec, start %llu, len %u\n",
1901 (unsigned long long)le64_to_cpu(rec.r_cpos),
1903 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
1905 &rec, index, meta_ac);
1911 set_len = min((u64)(cpos + len),
1912 le64_to_cpu(rec.r_cpos) + set_len) - cpos;
1913 rec.r_cpos = cpu_to_le64(cpos);
1914 rec.r_clusters = cpu_to_le32(set_len);
1915 le32_add_cpu(&rec.r_refcount, 1);
1917 mlog(0, "split refcount rec, start %llu, "
1918 "len %u, count %u\n",
1919 (unsigned long long)le64_to_cpu(rec.r_cpos),
1920 set_len, le32_to_cpu(rec.r_refcount));
1921 ret = ocfs2_split_refcount_rec(handle, ci,
1922 ref_root_bh, ref_leaf_bh,
1933 brelse(ref_leaf_bh);
1938 brelse(ref_leaf_bh);
1942 static int ocfs2_remove_refcount_extent(handle_t *handle,
1943 struct ocfs2_caching_info *ci,
1944 struct buffer_head *ref_root_bh,
1945 struct buffer_head *ref_leaf_bh,
1946 struct ocfs2_alloc_context *meta_ac,
1947 struct ocfs2_cached_dealloc_ctxt *dealloc)
1950 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1951 struct ocfs2_refcount_block *rb =
1952 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1953 struct ocfs2_extent_tree et;
1955 BUG_ON(rb->rf_records.rl_used);
1957 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1958 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
1959 1, meta_ac, dealloc);
1965 ocfs2_remove_from_cache(ci, ref_leaf_bh);
1968 * add the freed block to the dealloc so that it will be freed
1969 * when we run dealloc.
1971 ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
1972 le16_to_cpu(rb->rf_suballoc_slot),
1973 le64_to_cpu(rb->rf_blkno),
1974 le16_to_cpu(rb->rf_suballoc_bit));
1980 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1981 OCFS2_JOURNAL_ACCESS_WRITE);
1987 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1989 le32_add_cpu(&rb->rf_clusters, -1);
1992 * check whether we need to restore the root refcount block if
1993 * there is no leaf extent block at atll.
1995 if (!rb->rf_list.l_next_free_rec) {
1996 BUG_ON(rb->rf_clusters);
1998 mlog(0, "reset refcount tree root %llu to be a record block.\n",
1999 (unsigned long long)ref_root_bh->b_blocknr);
2004 memset(&rb->rf_records, 0, sb->s_blocksize -
2005 offsetof(struct ocfs2_refcount_block, rf_records));
2006 rb->rf_records.rl_count =
2007 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
2010 ocfs2_journal_dirty(handle, ref_root_bh);
2016 static int ocfs2_decrease_refcount_rec(handle_t *handle,
2017 struct ocfs2_caching_info *ci,
2018 struct buffer_head *ref_root_bh,
2019 struct buffer_head *ref_leaf_bh,
2020 int index, u64 cpos, unsigned int len,
2021 struct ocfs2_alloc_context *meta_ac,
2022 struct ocfs2_cached_dealloc_ctxt *dealloc)
2025 struct ocfs2_refcount_block *rb =
2026 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2027 struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
2029 BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
2031 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
2033 if (cpos == le64_to_cpu(rec->r_cpos) &&
2034 len == le32_to_cpu(rec->r_clusters))
2035 ret = ocfs2_change_refcount_rec(handle, ci,
2036 ref_leaf_bh, index, -1);
2038 struct ocfs2_refcount_rec split = *rec;
2039 split.r_cpos = cpu_to_le64(cpos);
2040 split.r_clusters = cpu_to_le32(len);
2042 le32_add_cpu(&split.r_refcount, -1);
2044 mlog(0, "split refcount rec, start %llu, "
2045 "len %u, count %u, original start %llu, len %u\n",
2046 (unsigned long long)le64_to_cpu(split.r_cpos),
2047 len, le32_to_cpu(split.r_refcount),
2048 (unsigned long long)le64_to_cpu(rec->r_cpos),
2049 le32_to_cpu(rec->r_clusters));
2050 ret = ocfs2_split_refcount_rec(handle, ci,
2051 ref_root_bh, ref_leaf_bh,
2061 /* Remove the leaf refcount block if it contains no refcount record. */
2062 if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
2063 ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
2064 ref_leaf_bh, meta_ac,
2074 static int __ocfs2_decrease_refcount(handle_t *handle,
2075 struct ocfs2_caching_info *ci,
2076 struct buffer_head *ref_root_bh,
2078 struct ocfs2_alloc_context *meta_ac,
2079 struct ocfs2_cached_dealloc_ctxt *dealloc)
2081 int ret = 0, index = 0;
2082 struct ocfs2_refcount_rec rec;
2083 unsigned int r_count = 0, r_len;
2084 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2085 struct buffer_head *ref_leaf_bh = NULL;
2087 mlog(0, "Tree owner %llu, decrease refcount start %llu, len %u\n",
2088 (unsigned long long)ocfs2_metadata_cache_owner(ci),
2089 (unsigned long long)cpos, len);
2092 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2093 cpos, len, &rec, &index,
2100 r_count = le32_to_cpu(rec.r_refcount);
2101 BUG_ON(r_count == 0);
2103 r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
2104 le32_to_cpu(rec.r_clusters)) - cpos;
2106 ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
2115 if (le32_to_cpu(rec.r_refcount) == 1) {
2116 ret = ocfs2_cache_cluster_dealloc(dealloc,
2117 ocfs2_clusters_to_blocks(sb, cpos),
2127 brelse(ref_leaf_bh);
2132 brelse(ref_leaf_bh);
2136 /* Caller must hold refcount tree lock. */
2137 int ocfs2_decrease_refcount(struct inode *inode,
2138 handle_t *handle, u32 cpos, u32 len,
2139 struct ocfs2_alloc_context *meta_ac,
2140 struct ocfs2_cached_dealloc_ctxt *dealloc)
2144 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2145 struct buffer_head *ref_root_bh = NULL;
2146 struct ocfs2_refcount_tree *tree;
2148 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
2150 ret = ocfs2_get_refcount_block(inode, &ref_blkno);
2156 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
2162 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
2169 ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
2170 cpos, len, meta_ac, dealloc);
2174 brelse(ref_root_bh);
2179 * Mark the already-existing extent at cpos as refcounted for len clusters.
2180 * This adds the refcount extent flag.
2182 * If the existing extent is larger than the request, initiate a
2183 * split. An attempt will be made at merging with adjacent extents.
2185 * The caller is responsible for passing down meta_ac if we'll need it.
2187 static int ocfs2_mark_extent_refcounted(struct inode *inode,
2188 struct ocfs2_extent_tree *et,
2189 handle_t *handle, u32 cpos,
2191 struct ocfs2_alloc_context *meta_ac,
2192 struct ocfs2_cached_dealloc_ctxt *dealloc)
2196 mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
2197 inode->i_ino, cpos, len, phys);
2199 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2200 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
2201 "tree, but the feature bit is not set in the "
2202 "super block.", inode->i_ino);
2207 ret = ocfs2_change_extent_flag(handle, et, cpos,
2208 len, phys, meta_ac, dealloc,
2209 OCFS2_EXT_REFCOUNTED, 0);
2218 * Given some contiguous physical clusters, calculate what we need
2219 * for modifying their refcount.
2221 static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2222 struct ocfs2_caching_info *ci,
2223 struct buffer_head *ref_root_bh,
2229 int ret = 0, index, ref_blocks = 0, recs_add = 0;
2230 u64 cpos = start_cpos;
2231 struct ocfs2_refcount_block *rb;
2232 struct ocfs2_refcount_rec rec;
2233 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
2236 mlog(0, "start_cpos %llu, clusters %u\n",
2237 (unsigned long long)start_cpos, clusters);
2239 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2240 cpos, clusters, &rec,
2241 &index, &ref_leaf_bh);
2247 if (ref_leaf_bh != prev_bh) {
2249 * Now we encounter a new leaf block, so calculate
2250 * whether we need to extend the old leaf.
2253 rb = (struct ocfs2_refcount_block *)
2256 if (le64_to_cpu(rb->rf_records.rl_used) +
2258 le16_to_cpu(rb->rf_records.rl_count))
2265 prev_bh = ref_leaf_bh;
2269 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2271 mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
2272 "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
2273 recs_add, (unsigned long long)cpos, clusters,
2274 (unsigned long long)le64_to_cpu(rec.r_cpos),
2275 le32_to_cpu(rec.r_clusters),
2276 le32_to_cpu(rec.r_refcount), index);
2278 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2279 le32_to_cpu(rec.r_clusters)) - cpos;
2281 * If the refcount rec already exist, cool. We just need
2282 * to check whether there is a split. Otherwise we just need
2283 * to increase the refcount.
2284 * If we will insert one, increases recs_add.
2286 * We record all the records which will be inserted to the
2287 * same refcount block, so that we can tell exactly whether
2288 * we need a new refcount block or not.
2290 if (rec.r_refcount) {
2291 /* Check whether we need a split at the beginning. */
2292 if (cpos == start_cpos &&
2293 cpos != le64_to_cpu(rec.r_cpos))
2296 /* Check whether we need a split in the end. */
2297 if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
2298 le32_to_cpu(rec.r_clusters))
2303 brelse(ref_leaf_bh);
2310 rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
2312 if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
2313 le16_to_cpu(rb->rf_records.rl_count))
2322 mlog(0, "we need ref_blocks %d\n", ref_blocks);
2323 *meta_add += ref_blocks;
2324 *credits += ref_blocks;
2327 * So we may need ref_blocks to insert into the tree.
2328 * That also means we need to change the b-tree and add that number
2329 * of records since we never merge them.
2330 * We need one more block for expansion since the new created leaf
2331 * block is also full and needs split.
2333 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2334 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
2335 struct ocfs2_extent_tree et;
2337 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2338 *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
2339 *credits += ocfs2_calc_extend_credits(sb,
2343 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
2348 brelse(ref_leaf_bh);
2354 * For refcount tree, we will decrease some contiguous clusters
2355 * refcount count, so just go through it to see how many blocks
2356 * we gonna touch and whether we need to create new blocks.
2358 * Normally the refcount blocks store these refcount should be
2359 * continguous also, so that we can get the number easily.
2360 * As for meta_ac, we will at most add split 2 refcount record and
2361 * 2 more refcount block, so just check it in a rough way.
2363 * Caller must hold refcount tree lock.
2365 int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
2366 struct buffer_head *di_bh,
2370 struct ocfs2_alloc_context **meta_ac)
2372 int ret, ref_blocks = 0;
2373 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2374 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2375 struct buffer_head *ref_root_bh = NULL;
2376 struct ocfs2_refcount_tree *tree;
2377 u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
2379 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2380 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
2381 "tree, but the feature bit is not set in the "
2382 "super block.", inode->i_ino);
2387 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
2389 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
2390 le64_to_cpu(di->i_refcount_loc), &tree);
2396 ret = ocfs2_read_refcount_block(&tree->rf_ci,
2397 le64_to_cpu(di->i_refcount_loc),
2404 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
2407 start_cpos, clusters,
2408 &ref_blocks, credits);
2414 mlog(0, "reserve new metadata %d, credits = %d\n",
2415 ref_blocks, *credits);
2418 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
2419 ref_blocks, meta_ac);
2425 brelse(ref_root_bh);
2429 #define MAX_CONTIG_BYTES 1048576
2431 static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
2433 return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
2436 static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
2438 return ~(ocfs2_cow_contig_clusters(sb) - 1);
2442 * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
2443 * find an offset (start + (n * contig_clusters)) that is closest to cpos
2444 * while still being less than or equal to it.
2446 * The goal is to break the extent at a multiple of contig_clusters.
2448 static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
2452 BUG_ON(start > cpos);
2454 return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
2458 * Given a cluster count of len, pad it out so that it is a multiple
2459 * of contig_clusters.
2461 static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
2464 unsigned int padded =
2465 (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
2466 ocfs2_cow_contig_mask(sb);
2476 * Calculate out the start and number of virtual clusters we need to to CoW.
2478 * cpos is vitual start cluster position we want to do CoW in a
2479 * file and write_len is the cluster length.
2481 * Normal we will start CoW from the beginning of extent record cotaining cpos.
2482 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
2483 * get good I/O from the resulting extent tree.
2485 static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
2486 struct buffer_head *di_bh,
2493 struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
2494 struct ocfs2_extent_list *el = &di->id2.i_list;
2495 int tree_height = le16_to_cpu(el->l_tree_depth), i;
2496 struct buffer_head *eb_bh = NULL;
2497 struct ocfs2_extent_block *eb = NULL;
2498 struct ocfs2_extent_rec *rec;
2499 unsigned int want_clusters, rec_end = 0;
2500 int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
2503 if (tree_height > 0) {
2504 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
2510 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2513 if (el->l_tree_depth) {
2514 ocfs2_error(inode->i_sb,
2515 "Inode %lu has non zero tree depth in "
2516 "leaf block %llu\n", inode->i_ino,
2517 (unsigned long long)eb_bh->b_blocknr);
2524 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2525 rec = &el->l_recs[i];
2527 if (ocfs2_is_empty_extent(rec)) {
2528 mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
2529 "index %d\n", inode->i_ino, i);
2533 if (le32_to_cpu(rec->e_cpos) +
2534 le16_to_cpu(rec->e_leaf_clusters) <= cpos)
2537 if (*cow_len == 0) {
2539 * We should find a refcounted record in the
2542 BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
2543 *cow_start = le32_to_cpu(rec->e_cpos);
2547 * If we encounter a hole or a non-refcounted record,
2550 if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
2551 (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)))
2554 leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
2555 rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
2558 * How many clusters do we actually need from
2559 * this extent? First we see how many we actually
2560 * need to complete the write. If that's smaller
2561 * than contig_clusters, we try for contig_clusters.
2564 want_clusters = write_len;
2566 want_clusters = (cpos + write_len) -
2567 (*cow_start + *cow_len);
2568 if (want_clusters < contig_clusters)
2569 want_clusters = contig_clusters;
2572 * If the write does not cover the whole extent, we
2573 * need to calculate how we're going to split the extent.
2574 * We try to do it on contig_clusters boundaries.
2576 * Any extent smaller than contig_clusters will be
2577 * CoWed in its entirety.
2579 if (leaf_clusters <= contig_clusters)
2580 *cow_len += leaf_clusters;
2581 else if (*cow_len || (*cow_start == cpos)) {
2583 * This extent needs to be CoW'd from its
2584 * beginning, so all we have to do is compute
2585 * how many clusters to grab. We align
2586 * want_clusters to the edge of contig_clusters
2587 * to get better I/O.
2589 want_clusters = ocfs2_cow_align_length(inode->i_sb,
2592 if (leaf_clusters < want_clusters)
2593 *cow_len += leaf_clusters;
2595 *cow_len += want_clusters;
2596 } else if ((*cow_start + contig_clusters) >=
2597 (cpos + write_len)) {
2599 * Breaking off contig_clusters at the front
2600 * of the extent will cover our write. That's
2603 *cow_len = contig_clusters;
2604 } else if ((rec_end - cpos) <= contig_clusters) {
2606 * Breaking off contig_clusters at the tail of
2607 * this extent will cover cpos.
2609 *cow_start = rec_end - contig_clusters;
2610 *cow_len = contig_clusters;
2611 } else if ((rec_end - cpos) <= want_clusters) {
2613 * While we can't fit the entire write in this
2614 * extent, we know that the write goes from cpos
2615 * to the end of the extent. Break that off.
2616 * We try to break it at some multiple of
2617 * contig_clusters from the front of the extent.
2618 * Failing that (ie, cpos is within
2619 * contig_clusters of the front), we'll CoW the
2622 *cow_start = ocfs2_cow_align_start(inode->i_sb,
2624 *cow_len = rec_end - *cow_start;
2627 * Ok, the entire write lives in the middle of
2628 * this extent. Let's try to slice the extent up
2629 * nicely. Optimally, our CoW region starts at
2630 * m*contig_clusters from the beginning of the
2631 * extent and goes for n*contig_clusters,
2632 * covering the entire write.
2634 *cow_start = ocfs2_cow_align_start(inode->i_sb,
2637 want_clusters = (cpos + write_len) - *cow_start;
2638 want_clusters = ocfs2_cow_align_length(inode->i_sb,
2640 if (*cow_start + want_clusters <= rec_end)
2641 *cow_len = want_clusters;
2643 *cow_len = rec_end - *cow_start;
2646 /* Have we covered our entire write yet? */
2647 if ((*cow_start + *cow_len) >= (cpos + write_len))
2651 * If we reach the end of the extent block and don't get enough
2652 * clusters, continue with the next extent block if possible.
2654 if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
2655 eb && eb->h_next_leaf_blk) {
2659 ret = ocfs2_read_extent_block(INODE_CACHE(inode),
2660 le64_to_cpu(eb->h_next_leaf_blk),
2667 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2679 * Prepare meta_ac, data_ac and calculate credits when we want to add some
2680 * num_clusters in data_tree "et" and change the refcount for the old
2681 * clusters(starting form p_cluster) in the refcount tree.
2684 * 1. since we may split the old tree, so we at most will need num_clusters + 2
2685 * more new leaf records.
2686 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
2687 * just give data_ac = NULL.
2689 static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2690 u32 p_cluster, u32 num_clusters,
2691 struct ocfs2_extent_tree *et,
2692 struct ocfs2_caching_info *ref_ci,
2693 struct buffer_head *ref_root_bh,
2694 struct ocfs2_alloc_context **meta_ac,
2695 struct ocfs2_alloc_context **data_ac,
2698 int ret = 0, meta_add = 0;
2699 int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
2701 if (num_free_extents < 0) {
2702 ret = num_free_extents;
2707 if (num_free_extents < num_clusters + 2)
2709 ocfs2_extend_meta_needed(et->et_root_el);
2711 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
2714 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
2715 p_cluster, num_clusters,
2716 &meta_add, credits);
2722 mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
2723 meta_add, num_clusters, *credits);
2724 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
2732 ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
2741 ocfs2_free_alloc_context(*meta_ac);
2749 static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2751 BUG_ON(buffer_dirty(bh));
2753 clear_buffer_mapped(bh);
2758 static int ocfs2_duplicate_clusters(handle_t *handle,
2759 struct ocfs2_cow_context *context,
2760 u32 cpos, u32 old_cluster,
2761 u32 new_cluster, u32 new_len)
2763 int ret = 0, partial;
2764 struct ocfs2_caching_info *ci = context->di_et.et_ci;
2765 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2766 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2769 unsigned int from, to;
2770 loff_t offset, end, map_end;
2771 struct address_space *mapping = context->inode->i_mapping;
2773 mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
2774 new_cluster, new_len, cpos);
2776 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2777 end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
2779 while (offset < end) {
2780 page_index = offset >> PAGE_CACHE_SHIFT;
2781 map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
2785 /* from, to is the offset within the page. */
2786 from = offset & (PAGE_CACHE_SIZE - 1);
2787 to = PAGE_CACHE_SIZE;
2788 if (map_end & (PAGE_CACHE_SIZE - 1))
2789 to = map_end & (PAGE_CACHE_SIZE - 1);
2791 page = grab_cache_page(mapping, page_index);
2793 /* This page can't be dirtied before we CoW it out. */
2794 BUG_ON(PageDirty(page));
2796 if (!PageUptodate(page)) {
2797 ret = block_read_full_page(page, ocfs2_get_block);
2805 if (page_has_buffers(page)) {
2806 ret = walk_page_buffers(handle, page_buffers(page),
2808 ocfs2_clear_cow_buffer);
2815 ocfs2_map_and_dirty_page(context->inode,
2817 page, 0, &new_block);
2818 mark_page_accessed(page);
2821 page_cache_release(page);
2831 static int ocfs2_clear_ext_refcount(handle_t *handle,
2832 struct ocfs2_extent_tree *et,
2833 u32 cpos, u32 p_cluster, u32 len,
2834 unsigned int ext_flags,
2835 struct ocfs2_alloc_context *meta_ac,
2836 struct ocfs2_cached_dealloc_ctxt *dealloc)
2839 struct ocfs2_extent_rec replace_rec;
2840 struct ocfs2_path *path = NULL;
2841 struct ocfs2_extent_list *el;
2842 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
2843 u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
2845 mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
2846 (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
2848 memset(&replace_rec, 0, sizeof(replace_rec));
2849 replace_rec.e_cpos = cpu_to_le32(cpos);
2850 replace_rec.e_leaf_clusters = cpu_to_le16(len);
2851 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
2853 replace_rec.e_flags = ext_flags;
2854 replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
2856 path = ocfs2_new_path_from_et(et);
2863 ret = ocfs2_find_path(et->et_ci, path, cpos);
2869 el = path_leaf_el(path);
2871 index = ocfs2_search_extent_list(el, cpos);
2872 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
2874 "Inode %llu has an extent at cpos %u which can no "
2875 "longer be found.\n",
2876 (unsigned long long)ino, cpos);
2881 ret = ocfs2_split_extent(handle, et, path, index,
2882 &replace_rec, meta_ac, dealloc);
2887 ocfs2_free_path(path);
2891 static int ocfs2_replace_clusters(handle_t *handle,
2892 struct ocfs2_cow_context *context,
2895 unsigned int ext_flags)
2898 struct ocfs2_caching_info *ci = context->di_et.et_ci;
2899 u64 ino = ocfs2_metadata_cache_owner(ci);
2901 mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
2902 (unsigned long long)ino, cpos, old, new, len, ext_flags);
2904 /*If the old clusters is unwritten, no need to duplicate. */
2905 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
2906 ret = ocfs2_duplicate_clusters(handle, context, cpos,
2914 ret = ocfs2_clear_ext_refcount(handle, &context->di_et,
2915 cpos, new, len, ext_flags,
2916 context->meta_ac, &context->dealloc);
2923 static int ocfs2_cow_sync_writeback(struct super_block *sb,
2924 struct ocfs2_cow_context *context,
2925 u32 cpos, u32 num_clusters)
2928 loff_t offset, end, map_end;
2932 if (ocfs2_should_order_data(context->inode))
2935 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2936 end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
2938 ret = filemap_fdatawrite_range(context->inode->i_mapping,
2945 while (offset < end) {
2946 page_index = offset >> PAGE_CACHE_SHIFT;
2947 map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
2951 page = grab_cache_page(context->inode->i_mapping, page_index);
2954 wait_on_page_writeback(page);
2955 if (PageError(page)) {
2959 mark_page_accessed(page);
2962 page_cache_release(page);
2972 static int ocfs2_make_clusters_writable(struct super_block *sb,
2973 struct ocfs2_cow_context *context,
2974 u32 cpos, u32 p_cluster,
2975 u32 num_clusters, unsigned int e_flags)
2977 int ret, credits = 0;
2978 u32 new_bit, new_len;
2979 struct ocfs2_super *osb = OCFS2_SB(sb);
2982 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
2985 context->ref_root_bh,
2987 &context->data_ac, &credits);
2993 handle = ocfs2_start_trans(osb, credits);
2994 if (IS_ERR(handle)) {
2995 ret = PTR_ERR(handle);
3000 while (num_clusters) {
3001 ret = __ocfs2_claim_clusters(osb, handle, context->data_ac,
3003 &new_bit, &new_len);
3009 ret = ocfs2_replace_clusters(handle, context,
3010 cpos, p_cluster, new_bit,
3018 p_cluster += new_len;
3019 num_clusters -= new_len;
3022 ret = __ocfs2_decrease_refcount(handle, context->ref_ci,
3023 context->ref_root_bh,
3024 p_cluster, num_clusters,
3033 * Here we should write the new page out first if we are
3034 * in write-back mode.
3036 ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
3041 ocfs2_commit_trans(osb, handle);
3044 if (context->data_ac) {
3045 ocfs2_free_alloc_context(context->data_ac);
3046 context->data_ac = NULL;
3048 if (context->meta_ac) {
3049 ocfs2_free_alloc_context(context->meta_ac);
3050 context->meta_ac = NULL;
3056 static int ocfs2_replace_cow(struct inode *inode,
3057 struct buffer_head *di_bh,
3058 struct buffer_head *ref_root_bh,
3059 struct ocfs2_caching_info *ref_ci,
3060 u32 cow_start, u32 cow_len)
3063 u32 p_cluster, num_clusters, start = cow_start;
3064 unsigned int ext_flags;
3065 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3066 struct ocfs2_cow_context *context;
3068 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
3069 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
3070 "tree, but the feature bit is not set in the "
3071 "super block.", inode->i_ino);
3075 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3082 context->inode = inode;
3083 context->cow_start = cow_start;
3084 context->cow_len = cow_len;
3085 context->ref_ci = ref_ci;
3086 context->ref_root_bh = ref_root_bh;
3088 ocfs2_init_dealloc_ctxt(&context->dealloc);
3089 ocfs2_init_dinode_extent_tree(&context->di_et,
3090 INODE_CACHE(inode), di_bh);
3093 ret = ocfs2_get_clusters(inode, cow_start, &p_cluster,
3094 &num_clusters, &ext_flags);
3100 BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
3102 if (cow_len < num_clusters)
3103 num_clusters = cow_len;
3105 ret = ocfs2_make_clusters_writable(inode->i_sb, context,
3106 cow_start, p_cluster,
3107 num_clusters, ext_flags);
3113 cow_len -= num_clusters;
3114 cow_start += num_clusters;
3119 * truncate the extent map here since no matter whether we meet with
3120 * any error during the action, we shouldn't trust cached extent map
3123 ocfs2_extent_map_trunc(inode, start);
3125 if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
3126 ocfs2_schedule_truncate_log_flush(osb, 1);
3127 ocfs2_run_deallocs(osb, &context->dealloc);
3135 * Starting at cpos, try to CoW write_len clusters.
3136 * This will stop when it runs into a hole or an unrefcounted extent.
3138 static int ocfs2_refcount_cow_hunk(struct inode *inode,
3139 struct buffer_head *di_bh,
3140 u32 cpos, u32 write_len)
3143 u32 cow_start = 0, cow_len = 0;
3144 struct ocfs2_inode_info *oi = OCFS2_I(inode);
3145 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3146 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3147 struct buffer_head *ref_root_bh = NULL;
3148 struct ocfs2_refcount_tree *ref_tree;
3150 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
3152 ret = ocfs2_refcount_cal_cow_clusters(inode, di_bh, cpos, write_len,
3153 &cow_start, &cow_len);
3158 mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
3159 "cow_len %u\n", inode->i_ino,
3160 cpos, write_len, cow_start, cow_len);
3162 BUG_ON(cow_len == 0);
3164 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
3165 1, &ref_tree, &ref_root_bh);
3171 ret = ocfs2_replace_cow(inode, di_bh, ref_root_bh, &ref_tree->rf_ci,
3172 cow_start, cow_len);
3176 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3177 brelse(ref_root_bh);
3183 * CoW any and all clusters between cpos and cpos+write_len.
3184 * If this returns successfully, all clusters between cpos and
3185 * cpos+write_len are safe to modify.
3187 int ocfs2_refcount_cow(struct inode *inode,
3188 struct buffer_head *di_bh,
3189 u32 cpos, u32 write_len)
3192 u32 p_cluster, num_clusters;
3193 unsigned int ext_flags;
3196 ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3197 &num_clusters, &ext_flags);
3203 if (write_len < num_clusters)
3204 num_clusters = write_len;
3206 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3207 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3215 write_len -= num_clusters;
3216 cpos += num_clusters;