]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - fs/btrfs/ordered-data.c
Merge branch 'stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf/linux...
[karo-tx-linux.git] / fs / btrfs / ordered-data.c
index f107312970405da1e3218118a8d0555894955c59..005c45db699eecc0fef93fca0832b91633dd5d8a 100644 (file)
@@ -196,6 +196,9 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
        entry->file_offset = file_offset;
        entry->start = start;
        entry->len = len;
+       if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
+           !(type == BTRFS_ORDERED_NOCOW))
+               entry->csum_bytes_left = disk_len;
        entry->disk_len = disk_len;
        entry->bytes_left = len;
        entry->inode = igrab(inode);
@@ -213,6 +216,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
        INIT_LIST_HEAD(&entry->root_extent_list);
        INIT_LIST_HEAD(&entry->work_list);
        init_completion(&entry->completion);
+       INIT_LIST_HEAD(&entry->log_list);
 
        trace_btrfs_ordered_extent_add(inode, entry);
 
@@ -270,6 +274,10 @@ void btrfs_add_ordered_sum(struct inode *inode,
        tree = &BTRFS_I(inode)->ordered_tree;
        spin_lock_irq(&tree->lock);
        list_add_tail(&sum->list, &entry->list);
+       WARN_ON(entry->csum_bytes_left < sum->len);
+       entry->csum_bytes_left -= sum->len;
+       if (entry->csum_bytes_left == 0)
+               wake_up(&entry->wait);
        spin_unlock_irq(&tree->lock);
 }
 
@@ -405,6 +413,66 @@ out:
        return ret == 0;
 }
 
+/* Needs to either be called under a log transaction or the log_mutex */
+void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
+{
+       struct btrfs_ordered_inode_tree *tree;
+       struct btrfs_ordered_extent *ordered;
+       struct rb_node *n;
+       int index = log->log_transid % 2;
+
+       tree = &BTRFS_I(inode)->ordered_tree;
+       spin_lock_irq(&tree->lock);
+       for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+               ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+               spin_lock(&log->log_extents_lock[index]);
+               if (list_empty(&ordered->log_list)) {
+                       list_add_tail(&ordered->log_list, &log->logged_list[index]);
+                       atomic_inc(&ordered->refs);
+               }
+               spin_unlock(&log->log_extents_lock[index]);
+       }
+       spin_unlock_irq(&tree->lock);
+}
+
+void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
+{
+       struct btrfs_ordered_extent *ordered;
+       int index = transid % 2;
+
+       spin_lock_irq(&log->log_extents_lock[index]);
+       while (!list_empty(&log->logged_list[index])) {
+               ordered = list_first_entry(&log->logged_list[index],
+                                          struct btrfs_ordered_extent,
+                                          log_list);
+               list_del_init(&ordered->log_list);
+               spin_unlock_irq(&log->log_extents_lock[index]);
+               wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
+                                                  &ordered->flags));
+               btrfs_put_ordered_extent(ordered);
+               spin_lock_irq(&log->log_extents_lock[index]);
+       }
+       spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
+void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
+{
+       struct btrfs_ordered_extent *ordered;
+       int index = transid % 2;
+
+       spin_lock_irq(&log->log_extents_lock[index]);
+       while (!list_empty(&log->logged_list[index])) {
+               ordered = list_first_entry(&log->logged_list[index],
+                                          struct btrfs_ordered_extent,
+                                          log_list);
+               list_del_init(&ordered->log_list);
+               spin_unlock_irq(&log->log_extents_lock[index]);
+               btrfs_put_ordered_extent(ordered);
+               spin_lock_irq(&log->log_extents_lock[index]);
+       }
+       spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
 /*
  * used to drop a reference on an ordered extent.  This will free
  * the extent if the last reference is dropped
@@ -489,6 +557,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
        INIT_LIST_HEAD(&splice);
        INIT_LIST_HEAD(&works);
 
+       mutex_lock(&root->fs_info->ordered_operations_mutex);
        spin_lock(&root->fs_info->ordered_extent_lock);
        list_splice_init(&root->fs_info->ordered_extents, &splice);
        while (!list_empty(&splice)) {
@@ -532,6 +601,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
 
                cond_resched();
        }
+       mutex_unlock(&root->fs_info->ordered_operations_mutex);
 }
 
 /*
@@ -544,10 +614,12 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
  * extra check to make sure the ordered operation list really is empty
  * before we return
  */
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
+int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+                                struct btrfs_root *root, int wait)
 {
        struct btrfs_inode *btrfs_inode;
        struct inode *inode;
+       struct btrfs_transaction *cur_trans = trans->transaction;
        struct list_head splice;
        struct list_head works;
        struct btrfs_delalloc_work *work, *next;
@@ -558,14 +630,10 @@ int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
 
        mutex_lock(&root->fs_info->ordered_operations_mutex);
        spin_lock(&root->fs_info->ordered_extent_lock);
-again:
-       list_splice_init(&root->fs_info->ordered_operations, &splice);
-
+       list_splice_init(&cur_trans->ordered_operations, &splice);
        while (!list_empty(&splice)) {
-
                btrfs_inode = list_entry(splice.next, struct btrfs_inode,
                                   ordered_operations);
-
                inode = &btrfs_inode->vfs_inode;
 
                list_del_init(&btrfs_inode->ordered_operations);
@@ -574,24 +642,22 @@ again:
                 * the inode may be getting freed (in sys_unlink path).
                 */
                inode = igrab(inode);
-
-               if (!wait && inode) {
-                       list_add_tail(&BTRFS_I(inode)->ordered_operations,
-                             &root->fs_info->ordered_operations);
-               }
-
                if (!inode)
                        continue;
+
+               if (!wait)
+                       list_add_tail(&BTRFS_I(inode)->ordered_operations,
+                                     &cur_trans->ordered_operations);
                spin_unlock(&root->fs_info->ordered_extent_lock);
 
                work = btrfs_alloc_delalloc_work(inode, wait, 1);
                if (!work) {
+                       spin_lock(&root->fs_info->ordered_extent_lock);
                        if (list_empty(&BTRFS_I(inode)->ordered_operations))
                                list_add_tail(&btrfs_inode->ordered_operations,
                                              &splice);
-                       spin_lock(&root->fs_info->ordered_extent_lock);
                        list_splice_tail(&splice,
-                                        &root->fs_info->ordered_operations);
+                                        &cur_trans->ordered_operations);
                        spin_unlock(&root->fs_info->ordered_extent_lock);
                        ret = -ENOMEM;
                        goto out;
@@ -603,9 +669,6 @@ again:
                cond_resched();
                spin_lock(&root->fs_info->ordered_extent_lock);
        }
-       if (wait && !list_empty(&root->fs_info->ordered_operations))
-               goto again;
-
        spin_unlock(&root->fs_info->ordered_extent_lock);
 out:
        list_for_each_entry_safe(work, next, &works, list) {
@@ -836,9 +899,16 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
         * if the disk i_size is already at the inode->i_size, or
         * this ordered extent is inside the disk i_size, we're done
         */
-       if (disk_i_size == i_size || offset <= disk_i_size) {
+       if (disk_i_size == i_size)
+               goto out;
+
+       /*
+        * We still need to update disk_i_size if outstanding_isize is greater
+        * than disk_i_size.
+        */
+       if (offset <= disk_i_size &&
+           (!ordered || ordered->outstanding_isize <= disk_i_size))
                goto out;
-       }
 
        /*
         * walk backward from this ordered extent to disk_i_size.
@@ -870,7 +940,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
                        break;
                if (test->file_offset >= i_size)
                        break;
-               if (test->file_offset >= disk_i_size) {
+               if (entry_end(test) > disk_i_size) {
                        /*
                         * we don't update disk_i_size now, so record this
                         * undealt i_size. Or we will not know the real
@@ -967,6 +1037,7 @@ out:
 void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root, struct inode *inode)
 {
+       struct btrfs_transaction *cur_trans = trans->transaction;
        u64 last_mod;
 
        last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
@@ -981,7 +1052,7 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
        spin_lock(&root->fs_info->ordered_extent_lock);
        if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
                list_add_tail(&BTRFS_I(inode)->ordered_operations,
-                             &root->fs_info->ordered_operations);
+                             &cur_trans->ordered_operations);
        }
        spin_unlock(&root->fs_info->ordered_extent_lock);
 }