]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - fs/ext4/extents.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[karo-tx-linux.git] / fs / ext4 / extents.c
index 7817ca7c2bbf623241acdf265426f381c361d32f..28dd8eeea6a93f2ce5ae727777ede13582e2038f 100644 (file)
@@ -112,7 +112,7 @@ static int ext4_split_extent_at(handle_t *handle,
                             int flags);
 
 static int ext4_find_delayed_extent(struct inode *inode,
-                                   struct ext4_ext_cache *newex);
+                                   struct extent_status *newes);
 
 static int ext4_ext_truncate_extend_restart(handle_t *handle,
                                            struct inode *inode,
@@ -714,7 +714,6 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
        eh->eh_magic = EXT4_EXT_MAGIC;
        eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
        ext4_mark_inode_dirty(handle, inode);
-       ext4_ext_invalidate_cache(inode);
        return 0;
 }
 
@@ -725,6 +724,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
        struct ext4_extent_header *eh;
        struct buffer_head *bh;
        short int depth, i, ppos = 0, alloc = 0;
+       int ret;
 
        eh = ext_inode_hdr(inode);
        depth = ext_depth(inode);
@@ -752,12 +752,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                path[ppos].p_ext = NULL;
 
                bh = sb_getblk(inode->i_sb, path[ppos].p_block);
-               if (unlikely(!bh))
+               if (unlikely(!bh)) {
+                       ret = -ENOMEM;
                        goto err;
+               }
                if (!bh_uptodate_or_lock(bh)) {
                        trace_ext4_ext_load_extent(inode, block,
                                                path[ppos].p_block);
-                       if (bh_submit_read(bh) < 0) {
+                       ret = bh_submit_read(bh);
+                       if (ret < 0) {
                                put_bh(bh);
                                goto err;
                        }
@@ -768,13 +771,15 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
                        put_bh(bh);
                        EXT4_ERROR_INODE(inode,
                                         "ppos %d > depth %d", ppos, depth);
+                       ret = -EIO;
                        goto err;
                }
                path[ppos].p_bh = bh;
                path[ppos].p_hdr = eh;
                i--;
 
-               if (ext4_ext_check_block(inode, eh, i, bh))
+               ret = ext4_ext_check_block(inode, eh, i, bh);
+               if (ret < 0)
                        goto err;
        }
 
@@ -796,7 +801,7 @@ err:
        ext4_ext_drop_refs(path);
        if (alloc)
                kfree(path);
-       return ERR_PTR(-EIO);
+       return ERR_PTR(ret);
 }
 
 /*
@@ -950,8 +955,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                goto cleanup;
        }
        bh = sb_getblk(inode->i_sb, newblock);
-       if (!bh) {
-               err = -EIO;
+       if (unlikely(!bh)) {
+               err = -ENOMEM;
                goto cleanup;
        }
        lock_buffer(bh);
@@ -1023,8 +1028,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                oldblock = newblock;
                newblock = ablocks[--a];
                bh = sb_getblk(inode->i_sb, newblock);
-               if (!bh) {
-                       err = -EIO;
+               if (unlikely(!bh)) {
+                       err = -ENOMEM;
                        goto cleanup;
                }
                lock_buffer(bh);
@@ -1136,11 +1141,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
                return err;
 
        bh = sb_getblk(inode->i_sb, newblock);
-       if (!bh) {
-               err = -EIO;
-               ext4_std_error(inode->i_sb, err);
-               return err;
-       }
+       if (unlikely(!bh))
+               return -ENOMEM;
        lock_buffer(bh);
 
        err = ext4_journal_get_create_access(handle, bh);
@@ -1960,7 +1962,6 @@ cleanup:
                ext4_ext_drop_refs(npath);
                kfree(npath);
        }
-       ext4_ext_invalidate_cache(inode);
        return err;
 }
 
@@ -1969,8 +1970,8 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                                    struct fiemap_extent_info *fieinfo)
 {
        struct ext4_ext_path *path = NULL;
-       struct ext4_ext_cache newex;
        struct ext4_extent *ex;
+       struct extent_status es;
        ext4_lblk_t next, next_del, start = 0, end = 0;
        ext4_lblk_t last = block + num;
        int exists, depth = 0, err = 0;
@@ -2044,37 +2045,47 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                BUG_ON(end <= start);
 
                if (!exists) {
-                       newex.ec_block = start;
-                       newex.ec_len = end - start;
-                       newex.ec_start = 0;
+                       es.es_lblk = start;
+                       es.es_len = end - start;
+                       es.es_pblk = 0;
                } else {
-                       newex.ec_block = le32_to_cpu(ex->ee_block);
-                       newex.ec_len = ext4_ext_get_actual_len(ex);
-                       newex.ec_start = ext4_ext_pblock(ex);
+                       es.es_lblk = le32_to_cpu(ex->ee_block);
+                       es.es_len = ext4_ext_get_actual_len(ex);
+                       es.es_pblk = ext4_ext_pblock(ex);
                        if (ext4_ext_is_uninitialized(ex))
                                flags |= FIEMAP_EXTENT_UNWRITTEN;
                }
 
                /*
-                * Find delayed extent and update newex accordingly. We call
-                * it even in !exists case to find out whether newex is the
+                * Find delayed extent and update es accordingly. We call
+                * it even in !exists case to find out whether es is the
                 * last existing extent or not.
                 */
-               next_del = ext4_find_delayed_extent(inode, &newex);
+               next_del = ext4_find_delayed_extent(inode, &es);
                if (!exists && next_del) {
                        exists = 1;
                        flags |= FIEMAP_EXTENT_DELALLOC;
                }
                up_read(&EXT4_I(inode)->i_data_sem);
 
-               if (unlikely(newex.ec_len == 0)) {
-                       EXT4_ERROR_INODE(inode, "newex.ec_len == 0");
+               if (unlikely(es.es_len == 0)) {
+                       EXT4_ERROR_INODE(inode, "es.es_len == 0");
                        err = -EIO;
                        break;
                }
 
-               /* This is possible iff next == next_del == EXT_MAX_BLOCKS */
-               if (next == next_del) {
+               /*
+                * This is possible iff next == next_del == EXT_MAX_BLOCKS.
+                * we need to check next == EXT_MAX_BLOCKS because it is
+                * possible that an extent is with unwritten and delayed
+                * status due to when an extent is delayed allocated and
+                * is allocated by fallocate status tree will track both of
+                * them in a extent.
+                *
+                * So we could return a unwritten and delayed extent, and
+                * its block is equal to 'next'.
+                */
+               if (next == next_del && next == EXT_MAX_BLOCKS) {
                        flags |= FIEMAP_EXTENT_LAST;
                        if (unlikely(next_del != EXT_MAX_BLOCKS ||
                                     next != EXT_MAX_BLOCKS)) {
@@ -2089,9 +2100,9 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
 
                if (exists) {
                        err = fiemap_fill_next_extent(fieinfo,
-                               (__u64)newex.ec_block << blksize_bits,
-                               (__u64)newex.ec_start << blksize_bits,
-                               (__u64)newex.ec_len << blksize_bits,
+                               (__u64)es.es_lblk << blksize_bits,
+                               (__u64)es.es_pblk << blksize_bits,
+                               (__u64)es.es_len << blksize_bits,
                                flags);
                        if (err < 0)
                                break;
@@ -2101,7 +2112,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
                        }
                }
 
-               block = newex.ec_block + newex.ec_len;
+               block = es.es_lblk + es.es_len;
        }
 
        if (path) {
@@ -2112,21 +2123,6 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
        return err;
 }
 
-static void
-ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
-                       __u32 len, ext4_fsblk_t start)
-{
-       struct ext4_ext_cache *cex;
-       BUG_ON(len == 0);
-       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
-       trace_ext4_ext_put_in_cache(inode, block, len, start);
-       cex = &EXT4_I(inode)->i_cached_extent;
-       cex->ec_block = block;
-       cex->ec_len = len;
-       cex->ec_start = start;
-       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
-}
-
 /*
  * ext4_ext_put_gap_in_cache:
  * calculate boundaries of the gap that the requested block fits into
@@ -2143,9 +2139,10 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
 
        ex = path[depth].p_ext;
        if (ex == NULL) {
-               /* there is no extent yet, so gap is [0;-] */
-               lblock = 0;
-               len = EXT_MAX_BLOCKS;
+               /*
+                * there is no extent yet, so gap is [0;-] and we
+                * don't cache it
+                */
                ext_debug("cache gap(whole file):");
        } else if (block < le32_to_cpu(ex->ee_block)) {
                lblock = block;
@@ -2154,6 +2151,9 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
                                block,
                                le32_to_cpu(ex->ee_block),
                                 ext4_ext_get_actual_len(ex));
+               if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
+                       ext4_es_insert_extent(inode, lblock, len, ~0,
+                                             EXTENT_STATUS_HOLE);
        } else if (block >= le32_to_cpu(ex->ee_block)
                        + ext4_ext_get_actual_len(ex)) {
                ext4_lblk_t next;
@@ -2167,58 +2167,15 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
                                block);
                BUG_ON(next == lblock);
                len = next - lblock;
+               if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
+                       ext4_es_insert_extent(inode, lblock, len, ~0,
+                                             EXTENT_STATUS_HOLE);
        } else {
                lblock = len = 0;
                BUG();
        }
 
        ext_debug(" -> %u:%lu\n", lblock, len);
-       ext4_ext_put_in_cache(inode, lblock, len, 0);
-}
-
-/*
- * ext4_ext_in_cache()
- * Checks to see if the given block is in the cache.
- * If it is, the cached extent is stored in the given
- * cache extent pointer.
- *
- * @inode: The files inode
- * @block: The block to look for in the cache
- * @ex:    Pointer where the cached extent will be stored
- *         if it contains block
- *
- * Return 0 if cache is invalid; 1 if the cache is valid
- */
-static int
-ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
-                 struct ext4_extent *ex)
-{
-       struct ext4_ext_cache *cex;
-       int ret = 0;
-
-       /*
-        * We borrow i_block_reservation_lock to protect i_cached_extent
-        */
-       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
-       cex = &EXT4_I(inode)->i_cached_extent;
-
-       /* has cache valid data? */
-       if (cex->ec_len == 0)
-               goto errout;
-
-       if (in_range(block, cex->ec_block, cex->ec_len)) {
-               ex->ee_block = cpu_to_le32(cex->ec_block);
-               ext4_ext_store_pblock(ex, cex->ec_start);
-               ex->ee_len = cpu_to_le16(cex->ec_len);
-               ext_debug("%u cached by %u:%u:%llu\n",
-                               block,
-                               cex->ec_block, cex->ec_len, cex->ec_start);
-               ret = 1;
-       }
-errout:
-       trace_ext4_ext_in_cache(inode, block, ret);
-       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
-       return ret;
 }
 
 /*
@@ -2653,13 +2610,11 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
        ext_debug("truncate since %u to %u\n", start, end);
 
        /* probably first extent we're gonna free will be last in block */
-       handle = ext4_journal_start(inode, depth + 1);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
 again:
-       ext4_ext_invalidate_cache(inode);
-
        trace_ext4_ext_remove_space(inode, start, depth);
 
        /*
@@ -3519,19 +3474,19 @@ out:
  *
  * Return 1 if there is a delalloc block in the range, otherwise 0.
  */
-static int ext4_find_delalloc_range(struct inode *inode,
-                                   ext4_lblk_t lblk_start,
-                                   ext4_lblk_t lblk_end)
+int ext4_find_delalloc_range(struct inode *inode,
+                            ext4_lblk_t lblk_start,
+                            ext4_lblk_t lblk_end)
 {
        struct extent_status es;
 
-       es.start = lblk_start;
-       ext4_es_find_extent(inode, &es);
-       if (es.len == 0)
+       ext4_es_find_delayed_extent(inode, lblk_start, &es);
+       if (es.es_len == 0)
                return 0; /* there is no delay extent in this tree */
-       else if (es.start <= lblk_start && lblk_start < es.start + es.len)
+       else if (es.es_lblk <= lblk_start &&
+                lblk_start < es.es_lblk + es.es_len)
                return 1;
-       else if (lblk_start <= es.start && es.start <= lblk_end)
+       else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
                return 1;
        else
                return 0;
@@ -3656,6 +3611,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
                        ext4_set_io_unwritten_flag(inode, io);
                else
                        ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
+               map->m_flags |= EXT4_MAP_UNWRITTEN;
                if (ext4_should_dioread_nolock(inode))
                        map->m_flags |= EXT4_MAP_UNINIT;
                goto out;
@@ -3677,8 +3633,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
         * repeat fallocate creation request
         * we already have an unwritten extent
         */
-       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
+       if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
+               map->m_flags |= EXT4_MAP_UNWRITTEN;
                goto map_out;
+       }
 
        /* buffered READ or buffered write_begin() lookup */
        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
@@ -3898,35 +3856,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                  map->m_lblk, map->m_len, inode->i_ino);
        trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
 
-       /* check in cache */
-       if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
-               if (!newex.ee_start_lo && !newex.ee_start_hi) {
-                       if ((sbi->s_cluster_ratio > 1) &&
-                           ext4_find_delalloc_cluster(inode, map->m_lblk))
-                               map->m_flags |= EXT4_MAP_FROM_CLUSTER;
-
-                       if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
-                               /*
-                                * block isn't allocated yet and
-                                * user doesn't want to allocate it
-                                */
-                               goto out2;
-                       }
-                       /* we should allocate requested block */
-               } else {
-                       /* block is already allocated */
-                       if (sbi->s_cluster_ratio > 1)
-                               map->m_flags |= EXT4_MAP_FROM_CLUSTER;
-                       newblock = map->m_lblk
-                                  - le32_to_cpu(newex.ee_block)
-                                  + ext4_ext_pblock(&newex);
-                       /* number of remaining blocks in the extent */
-                       allocated = ext4_ext_get_actual_len(&newex) -
-                               (map->m_lblk - le32_to_cpu(newex.ee_block));
-                       goto out;
-               }
-       }
-
        /* find extent for this block */
        path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
        if (IS_ERR(path)) {
@@ -3973,15 +3902,9 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                        ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
                                  ee_block, ee_len, newblock);
 
-                       /*
-                        * Do not put uninitialized extent
-                        * in the cache
-                        */
-                       if (!ext4_ext_is_uninitialized(ex)) {
-                               ext4_ext_put_in_cache(inode, ee_block,
-                                       ee_len, ee_start);
+                       if (!ext4_ext_is_uninitialized(ex))
                                goto out;
-                       }
+
                        allocated = ext4_ext_handle_uninitialized_extents(
                                handle, inode, map, path, flags,
                                allocated, newblock);
@@ -4002,7 +3925,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                 * put just found gap into cache to speed up
                 * subsequent requests
                 */
-               ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
+               if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
+                       ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
                goto out2;
        }
 
@@ -4108,6 +4032,7 @@ got_allocated_blocks:
        /* Mark uninitialized */
        if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
                ext4_ext_mark_uninitialized(&newex);
+               map->m_flags |= EXT4_MAP_UNWRITTEN;
                /*
                 * io_end structure was created for every IO write to an
                 * uninitialized extent. To avoid unnecessary conversion,
@@ -4241,10 +4166,9 @@ got_allocated_blocks:
         * Cache the extent and update transaction to commit on fdatasync only
         * when it is _not_ an uninitialized extent.
         */
-       if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
-               ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
+       if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
                ext4_update_inode_fsync_trans(handle, inode, 1);
-       else
+       else
                ext4_update_inode_fsync_trans(handle, inode, 0);
 out:
        if (allocated > map->m_len)
@@ -4284,7 +4208,7 @@ void ext4_ext_truncate(struct inode *inode)
         * probably first extent we're gonna free will be last in block
         */
        err = ext4_writepage_trans_blocks(inode);
-       handle = ext4_journal_start(inode, err);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, err);
        if (IS_ERR(handle))
                return;
 
@@ -4303,7 +4227,6 @@ void ext4_ext_truncate(struct inode *inode)
                goto out_stop;
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_ext_invalidate_cache(inode);
 
        ext4_discard_preallocations(inode);
 
@@ -4397,13 +4320,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        struct ext4_map_blocks map;
        unsigned int credits, blkbits = inode->i_blkbits;
 
-       /*
-        * currently supporting (pre)allocate mode for extent-based
-        * files _only_
-        */
-       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
-               return -EOPNOTSUPP;
-
        /* Return error if mode is not supported */
        if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
                return -EOPNOTSUPP;
@@ -4415,6 +4331,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (ret)
                return ret;
 
+       /*
+        * currently supporting (pre)allocate mode for extent-based
+        * files _only_
+        */
+       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
+               return -EOPNOTSUPP;
+
        trace_ext4_fallocate_enter(inode, offset, len, mode);
        map.m_lblk = offset >> blkbits;
        /*
@@ -4451,7 +4374,8 @@ retry:
        while (ret >= 0 && ret < max_blocks) {
                map.m_lblk = map.m_lblk + ret;
                map.m_len = max_blocks = max_blocks - ret;
-               handle = ext4_journal_start(inode, credits);
+               handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
+                                           credits);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        break;
@@ -4459,11 +4383,11 @@ retry:
                ret = ext4_map_blocks(handle, inode, &map, flags);
                if (ret <= 0) {
 #ifdef EXT4FS_DEBUG
-                       WARN_ON(ret <= 0);
-                       printk(KERN_ERR "%s: ext4_ext_map_blocks "
-                                   "returned error inode#%lu, block=%u, "
-                                   "max_blocks=%u", __func__,
-                                   inode->i_ino, map.m_lblk, max_blocks);
+                       ext4_warning(inode->i_sb,
+                                    "inode #%lu: block %u: len %u: "
+                                    "ext4_ext_map_blocks returned %d",
+                                    inode->i_ino, map.m_lblk,
+                                    map.m_len, ret);
 #endif
                        ext4_mark_inode_dirty(handle, inode);
                        ret2 = ext4_journal_stop(handle);
@@ -4529,21 +4453,19 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
        while (ret >= 0 && ret < max_blocks) {
                map.m_lblk += ret;
                map.m_len = (max_blocks -= ret);
-               handle = ext4_journal_start(inode, credits);
+               handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
                        break;
                }
                ret = ext4_map_blocks(handle, inode, &map,
                                      EXT4_GET_BLOCKS_IO_CONVERT_EXT);
-               if (ret <= 0) {
-                       WARN_ON(ret <= 0);
-                       ext4_msg(inode->i_sb, KERN_ERR,
-                                "%s:%d: inode #%lu: block %u: len %u: "
-                                "ext4_ext_map_blocks returned %d",
-                                __func__, __LINE__, inode->i_ino, map.m_lblk,
-                                map.m_len, ret);
-               }
+               if (ret <= 0)
+                       ext4_warning(inode->i_sb,
+                                    "inode #%lu: block %u: len %u: "
+                                    "ext4_ext_map_blocks returned %d",
+                                    inode->i_ino, map.m_lblk,
+                                    map.m_len, ret);
                ext4_mark_inode_dirty(handle, inode);
                ret2 = ext4_journal_stop(handle);
                if (ret <= 0 || ret2 )
@@ -4553,42 +4475,48 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
 }
 
 /*
- * If newex is not existing extent (newex->ec_start equals zero) find
- * delayed extent at start of newex and update newex accordingly and
+ * If newes is not existing extent (newes->ec_pblk equals zero) find
+ * delayed extent at start of newes and update newes accordingly and
  * return start of the next delayed extent.
  *
- * If newex is existing extent (newex->ec_start is not equal zero)
+ * If newes is existing extent (newes->ec_pblk is not equal zero)
  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
- * extent found. Leave newex unmodified.
+ * extent found. Leave newes unmodified.
  */
 static int ext4_find_delayed_extent(struct inode *inode,
-                                   struct ext4_ext_cache *newex)
+                                   struct extent_status *newes)
 {
        struct extent_status es;
-       ext4_lblk_t next_del;
+       ext4_lblk_t block, next_del;
 
-       es.start = newex->ec_block;
-       next_del = ext4_es_find_extent(inode, &es);
+       ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
 
-       if (newex->ec_start == 0) {
+       if (newes->es_pblk == 0) {
                /*
-                * No extent in extent-tree contains block @newex->ec_start,
+                * No extent in extent-tree contains block @newes->es_pblk,
                 * then the block may stay in 1)a hole or 2)delayed-extent.
                 */
-               if (es.len == 0)
+               if (es.es_len == 0)
                        /* A hole found. */
                        return 0;
 
-               if (es.start > newex->ec_block) {
+               if (es.es_lblk > newes->es_lblk) {
                        /* A hole found. */
-                       newex->ec_len = min(es.start - newex->ec_block,
-                                           newex->ec_len);
+                       newes->es_len = min(es.es_lblk - newes->es_lblk,
+                                           newes->es_len);
                        return 0;
                }
 
-               newex->ec_len = es.start + es.len - newex->ec_block;
+               newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
        }
 
+       block = newes->es_lblk + newes->es_len;
+       ext4_es_find_delayed_extent(inode, block, &es);
+       if (es.es_len == 0)
+               next_del = EXT_MAX_BLOCKS;
+       else
+               next_del = es.es_lblk;
+
        return next_del;
 }
 /* fiemap flags we can handle specified here */
@@ -4709,7 +4637,7 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
        inode_dio_wait(inode);
 
        credits = ext4_writepage_trans_blocks(inode);
-       handle = ext4_journal_start(inode, credits);
+       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
                goto out_dio;
@@ -4786,14 +4714,12 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
                goto out;
 
        down_write(&EXT4_I(inode)->i_data_sem);
-       ext4_ext_invalidate_cache(inode);
        ext4_discard_preallocations(inode);
 
        err = ext4_es_remove_extent(inode, first_block,
                                    stop_block - first_block);
        err = ext4_ext_remove_space(inode, first_block, stop_block - 1);
 
-       ext4_ext_invalidate_cache(inode);
        ext4_discard_preallocations(inode);
 
        if (IS_SYNC(inode))