]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/ext4/extents.c
regulator: Fix regulator_get_{optional,exclusive}() documentation
[karo-tx-linux.git] / fs / ext4 / extents.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * Architecture independence:
6  *   Copyright (c) 2005, Bull S.A.
7  *   Written by Pierre Peiffer <pierre.peiffer@bull.net>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public Licens
19  * along with this program; if not, write to the Free Software
20  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
21  */
22
23 /*
24  * Extents support for EXT4
25  *
26  * TODO:
27  *   - ext4*_error() should be used in some situations
28  *   - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29  *   - smart tree reduction
30  */
31
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <asm/uaccess.h>
41 #include <linux/fiemap.h>
42 #include "ext4_jbd2.h"
43 #include "ext4_extents.h"
44 #include "xattr.h"
45
46 #include <trace/events/ext4.h>
47
48 /*
49  * used by extent splitting.
50  */
51 #define EXT4_EXT_MAY_ZEROOUT    0x1  /* safe to zeroout if split fails \
52                                         due to ENOSPC */
53 #define EXT4_EXT_MARK_UNINIT1   0x2  /* mark first half uninitialized */
54 #define EXT4_EXT_MARK_UNINIT2   0x4  /* mark second half uninitialized */
55
56 #define EXT4_EXT_DATA_VALID1    0x8  /* first half contains valid data */
57 #define EXT4_EXT_DATA_VALID2    0x10 /* second half contains valid data */
58
59 static __le32 ext4_extent_block_csum(struct inode *inode,
60                                      struct ext4_extent_header *eh)
61 {
62         struct ext4_inode_info *ei = EXT4_I(inode);
63         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
64         __u32 csum;
65
66         csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
67                            EXT4_EXTENT_TAIL_OFFSET(eh));
68         return cpu_to_le32(csum);
69 }
70
71 static int ext4_extent_block_csum_verify(struct inode *inode,
72                                          struct ext4_extent_header *eh)
73 {
74         struct ext4_extent_tail *et;
75
76         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
77                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
78                 return 1;
79
80         et = find_ext4_extent_tail(eh);
81         if (et->et_checksum != ext4_extent_block_csum(inode, eh))
82                 return 0;
83         return 1;
84 }
85
86 static void ext4_extent_block_csum_set(struct inode *inode,
87                                        struct ext4_extent_header *eh)
88 {
89         struct ext4_extent_tail *et;
90
91         if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
92                 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
93                 return;
94
95         et = find_ext4_extent_tail(eh);
96         et->et_checksum = ext4_extent_block_csum(inode, eh);
97 }
98
99 static int ext4_split_extent(handle_t *handle,
100                                 struct inode *inode,
101                                 struct ext4_ext_path *path,
102                                 struct ext4_map_blocks *map,
103                                 int split_flag,
104                                 int flags);
105
106 static int ext4_split_extent_at(handle_t *handle,
107                              struct inode *inode,
108                              struct ext4_ext_path *path,
109                              ext4_lblk_t split,
110                              int split_flag,
111                              int flags);
112
113 static int ext4_find_delayed_extent(struct inode *inode,
114                                     struct extent_status *newes);
115
116 static int ext4_ext_truncate_extend_restart(handle_t *handle,
117                                             struct inode *inode,
118                                             int needed)
119 {
120         int err;
121
122         if (!ext4_handle_valid(handle))
123                 return 0;
124         if (handle->h_buffer_credits > needed)
125                 return 0;
126         err = ext4_journal_extend(handle, needed);
127         if (err <= 0)
128                 return err;
129         err = ext4_truncate_restart_trans(handle, inode, needed);
130         if (err == 0)
131                 err = -EAGAIN;
132
133         return err;
134 }
135
136 /*
137  * could return:
138  *  - EROFS
139  *  - ENOMEM
140  */
141 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
142                                 struct ext4_ext_path *path)
143 {
144         if (path->p_bh) {
145                 /* path points to block */
146                 return ext4_journal_get_write_access(handle, path->p_bh);
147         }
148         /* path points to leaf/index in inode body */
149         /* we use in-core data, no need to protect them */
150         return 0;
151 }
152
153 /*
154  * could return:
155  *  - EROFS
156  *  - ENOMEM
157  *  - EIO
158  */
159 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
160                      struct inode *inode, struct ext4_ext_path *path)
161 {
162         int err;
163         if (path->p_bh) {
164                 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
165                 /* path points to block */
166                 err = __ext4_handle_dirty_metadata(where, line, handle,
167                                                    inode, path->p_bh);
168         } else {
169                 /* path points to leaf/index in inode body */
170                 err = ext4_mark_inode_dirty(handle, inode);
171         }
172         return err;
173 }
174
175 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
176                               struct ext4_ext_path *path,
177                               ext4_lblk_t block)
178 {
179         if (path) {
180                 int depth = path->p_depth;
181                 struct ext4_extent *ex;
182
183                 /*
184                  * Try to predict block placement assuming that we are
185                  * filling in a file which will eventually be
186                  * non-sparse --- i.e., in the case of libbfd writing
187                  * an ELF object sections out-of-order but in a way
188                  * the eventually results in a contiguous object or
189                  * executable file, or some database extending a table
190                  * space file.  However, this is actually somewhat
191                  * non-ideal if we are writing a sparse file such as
192                  * qemu or KVM writing a raw image file that is going
193                  * to stay fairly sparse, since it will end up
194                  * fragmenting the file system's free space.  Maybe we
195                  * should have some hueristics or some way to allow
196                  * userspace to pass a hint to file system,
197                  * especially if the latter case turns out to be
198                  * common.
199                  */
200                 ex = path[depth].p_ext;
201                 if (ex) {
202                         ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
203                         ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
204
205                         if (block > ext_block)
206                                 return ext_pblk + (block - ext_block);
207                         else
208                                 return ext_pblk - (ext_block - block);
209                 }
210
211                 /* it looks like index is empty;
212                  * try to find starting block from index itself */
213                 if (path[depth].p_bh)
214                         return path[depth].p_bh->b_blocknr;
215         }
216
217         /* OK. use inode's group */
218         return ext4_inode_to_goal_block(inode);
219 }
220
221 /*
222  * Allocation for a meta data block
223  */
224 static ext4_fsblk_t
225 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
226                         struct ext4_ext_path *path,
227                         struct ext4_extent *ex, int *err, unsigned int flags)
228 {
229         ext4_fsblk_t goal, newblock;
230
231         goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
232         newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
233                                         NULL, err);
234         return newblock;
235 }
236
237 static inline int ext4_ext_space_block(struct inode *inode, int check)
238 {
239         int size;
240
241         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
242                         / sizeof(struct ext4_extent);
243 #ifdef AGGRESSIVE_TEST
244         if (!check && size > 6)
245                 size = 6;
246 #endif
247         return size;
248 }
249
250 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
251 {
252         int size;
253
254         size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
255                         / sizeof(struct ext4_extent_idx);
256 #ifdef AGGRESSIVE_TEST
257         if (!check && size > 5)
258                 size = 5;
259 #endif
260         return size;
261 }
262
263 static inline int ext4_ext_space_root(struct inode *inode, int check)
264 {
265         int size;
266
267         size = sizeof(EXT4_I(inode)->i_data);
268         size -= sizeof(struct ext4_extent_header);
269         size /= sizeof(struct ext4_extent);
270 #ifdef AGGRESSIVE_TEST
271         if (!check && size > 3)
272                 size = 3;
273 #endif
274         return size;
275 }
276
277 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
278 {
279         int size;
280
281         size = sizeof(EXT4_I(inode)->i_data);
282         size -= sizeof(struct ext4_extent_header);
283         size /= sizeof(struct ext4_extent_idx);
284 #ifdef AGGRESSIVE_TEST
285         if (!check && size > 4)
286                 size = 4;
287 #endif
288         return size;
289 }
290
291 /*
292  * Calculate the number of metadata blocks needed
293  * to allocate @blocks
294  * Worse case is one block per extent
295  */
296 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
297 {
298         struct ext4_inode_info *ei = EXT4_I(inode);
299         int idxs;
300
301         idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
302                 / sizeof(struct ext4_extent_idx));
303
304         /*
305          * If the new delayed allocation block is contiguous with the
306          * previous da block, it can share index blocks with the
307          * previous block, so we only need to allocate a new index
308          * block every idxs leaf blocks.  At ldxs**2 blocks, we need
309          * an additional index block, and at ldxs**3 blocks, yet
310          * another index blocks.
311          */
312         if (ei->i_da_metadata_calc_len &&
313             ei->i_da_metadata_calc_last_lblock+1 == lblock) {
314                 int num = 0;
315
316                 if ((ei->i_da_metadata_calc_len % idxs) == 0)
317                         num++;
318                 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
319                         num++;
320                 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
321                         num++;
322                         ei->i_da_metadata_calc_len = 0;
323                 } else
324                         ei->i_da_metadata_calc_len++;
325                 ei->i_da_metadata_calc_last_lblock++;
326                 return num;
327         }
328
329         /*
330          * In the worst case we need a new set of index blocks at
331          * every level of the inode's extent tree.
332          */
333         ei->i_da_metadata_calc_len = 1;
334         ei->i_da_metadata_calc_last_lblock = lblock;
335         return ext_depth(inode) + 1;
336 }
337
338 static int
339 ext4_ext_max_entries(struct inode *inode, int depth)
340 {
341         int max;
342
343         if (depth == ext_depth(inode)) {
344                 if (depth == 0)
345                         max = ext4_ext_space_root(inode, 1);
346                 else
347                         max = ext4_ext_space_root_idx(inode, 1);
348         } else {
349                 if (depth == 0)
350                         max = ext4_ext_space_block(inode, 1);
351                 else
352                         max = ext4_ext_space_block_idx(inode, 1);
353         }
354
355         return max;
356 }
357
358 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
359 {
360         ext4_fsblk_t block = ext4_ext_pblock(ext);
361         int len = ext4_ext_get_actual_len(ext);
362         ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
363         ext4_lblk_t last = lblock + len - 1;
364
365         if (lblock > last)
366                 return 0;
367         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
368 }
369
370 static int ext4_valid_extent_idx(struct inode *inode,
371                                 struct ext4_extent_idx *ext_idx)
372 {
373         ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
374
375         return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
376 }
377
378 static int ext4_valid_extent_entries(struct inode *inode,
379                                 struct ext4_extent_header *eh,
380                                 int depth)
381 {
382         unsigned short entries;
383         if (eh->eh_entries == 0)
384                 return 1;
385
386         entries = le16_to_cpu(eh->eh_entries);
387
388         if (depth == 0) {
389                 /* leaf entries */
390                 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
391                 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
392                 ext4_fsblk_t pblock = 0;
393                 ext4_lblk_t lblock = 0;
394                 ext4_lblk_t prev = 0;
395                 int len = 0;
396                 while (entries) {
397                         if (!ext4_valid_extent(inode, ext))
398                                 return 0;
399
400                         /* Check for overlapping extents */
401                         lblock = le32_to_cpu(ext->ee_block);
402                         len = ext4_ext_get_actual_len(ext);
403                         if ((lblock <= prev) && prev) {
404                                 pblock = ext4_ext_pblock(ext);
405                                 es->s_last_error_block = cpu_to_le64(pblock);
406                                 return 0;
407                         }
408                         ext++;
409                         entries--;
410                         prev = lblock + len - 1;
411                 }
412         } else {
413                 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
414                 while (entries) {
415                         if (!ext4_valid_extent_idx(inode, ext_idx))
416                                 return 0;
417                         ext_idx++;
418                         entries--;
419                 }
420         }
421         return 1;
422 }
423
424 static int __ext4_ext_check(const char *function, unsigned int line,
425                             struct inode *inode, struct ext4_extent_header *eh,
426                             int depth, ext4_fsblk_t pblk)
427 {
428         const char *error_msg;
429         int max = 0;
430
431         if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
432                 error_msg = "invalid magic";
433                 goto corrupted;
434         }
435         if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
436                 error_msg = "unexpected eh_depth";
437                 goto corrupted;
438         }
439         if (unlikely(eh->eh_max == 0)) {
440                 error_msg = "invalid eh_max";
441                 goto corrupted;
442         }
443         max = ext4_ext_max_entries(inode, depth);
444         if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
445                 error_msg = "too large eh_max";
446                 goto corrupted;
447         }
448         if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
449                 error_msg = "invalid eh_entries";
450                 goto corrupted;
451         }
452         if (!ext4_valid_extent_entries(inode, eh, depth)) {
453                 error_msg = "invalid extent entries";
454                 goto corrupted;
455         }
456         /* Verify checksum on non-root extent tree nodes */
457         if (ext_depth(inode) != depth &&
458             !ext4_extent_block_csum_verify(inode, eh)) {
459                 error_msg = "extent tree corrupted";
460                 goto corrupted;
461         }
462         return 0;
463
464 corrupted:
465         ext4_error_inode(inode, function, line, 0,
466                          "pblk %llu bad header/extent: %s - magic %x, "
467                          "entries %u, max %u(%u), depth %u(%u)",
468                          (unsigned long long) pblk, error_msg,
469                          le16_to_cpu(eh->eh_magic),
470                          le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
471                          max, le16_to_cpu(eh->eh_depth), depth);
472         return -EIO;
473 }
474
475 #define ext4_ext_check(inode, eh, depth, pblk)                  \
476         __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
477
478 int ext4_ext_check_inode(struct inode *inode)
479 {
480         return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
481 }
482
483 static struct buffer_head *
484 __read_extent_tree_block(const char *function, unsigned int line,
485                          struct inode *inode, ext4_fsblk_t pblk, int depth,
486                          int flags)
487 {
488         struct buffer_head              *bh;
489         int                             err;
490
491         bh = sb_getblk(inode->i_sb, pblk);
492         if (unlikely(!bh))
493                 return ERR_PTR(-ENOMEM);
494
495         if (!bh_uptodate_or_lock(bh)) {
496                 trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
497                 err = bh_submit_read(bh);
498                 if (err < 0)
499                         goto errout;
500         }
501         if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
502                 return bh;
503         err = __ext4_ext_check(function, line, inode,
504                                ext_block_hdr(bh), depth, pblk);
505         if (err)
506                 goto errout;
507         set_buffer_verified(bh);
508         /*
509          * If this is a leaf block, cache all of its entries
510          */
511         if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
512                 struct ext4_extent_header *eh = ext_block_hdr(bh);
513                 struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
514                 ext4_lblk_t prev = 0;
515                 int i;
516
517                 for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
518                         unsigned int status = EXTENT_STATUS_WRITTEN;
519                         ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
520                         int len = ext4_ext_get_actual_len(ex);
521
522                         if (prev && (prev != lblk))
523                                 ext4_es_cache_extent(inode, prev,
524                                                      lblk - prev, ~0,
525                                                      EXTENT_STATUS_HOLE);
526
527                         if (ext4_ext_is_uninitialized(ex))
528                                 status = EXTENT_STATUS_UNWRITTEN;
529                         ext4_es_cache_extent(inode, lblk, len,
530                                              ext4_ext_pblock(ex), status);
531                         prev = lblk + len;
532                 }
533         }
534         return bh;
535 errout:
536         put_bh(bh);
537         return ERR_PTR(err);
538
539 }
540
541 #define read_extent_tree_block(inode, pblk, depth, flags)               \
542         __read_extent_tree_block(__func__, __LINE__, (inode), (pblk),   \
543                                  (depth), (flags))
544
545 /*
546  * This function is called to cache a file's extent information in the
547  * extent status tree
548  */
549 int ext4_ext_precache(struct inode *inode)
550 {
551         struct ext4_inode_info *ei = EXT4_I(inode);
552         struct ext4_ext_path *path = NULL;
553         struct buffer_head *bh;
554         int i = 0, depth, ret = 0;
555
556         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
557                 return 0;       /* not an extent-mapped inode */
558
559         down_read(&ei->i_data_sem);
560         depth = ext_depth(inode);
561
562         path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
563                        GFP_NOFS);
564         if (path == NULL) {
565                 up_read(&ei->i_data_sem);
566                 return -ENOMEM;
567         }
568
569         /* Don't cache anything if there are no external extent blocks */
570         if (depth == 0)
571                 goto out;
572         path[0].p_hdr = ext_inode_hdr(inode);
573         ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
574         if (ret)
575                 goto out;
576         path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
577         while (i >= 0) {
578                 /*
579                  * If this is a leaf block or we've reached the end of
580                  * the index block, go up
581                  */
582                 if ((i == depth) ||
583                     path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
584                         brelse(path[i].p_bh);
585                         path[i].p_bh = NULL;
586                         i--;
587                         continue;
588                 }
589                 bh = read_extent_tree_block(inode,
590                                             ext4_idx_pblock(path[i].p_idx++),
591                                             depth - i - 1,
592                                             EXT4_EX_FORCE_CACHE);
593                 if (IS_ERR(bh)) {
594                         ret = PTR_ERR(bh);
595                         break;
596                 }
597                 i++;
598                 path[i].p_bh = bh;
599                 path[i].p_hdr = ext_block_hdr(bh);
600                 path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
601         }
602         ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
603 out:
604         up_read(&ei->i_data_sem);
605         ext4_ext_drop_refs(path);
606         kfree(path);
607         return ret;
608 }
609
610 #ifdef EXT_DEBUG
611 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
612 {
613         int k, l = path->p_depth;
614
615         ext_debug("path:");
616         for (k = 0; k <= l; k++, path++) {
617                 if (path->p_idx) {
618                   ext_debug("  %d->%llu", le32_to_cpu(path->p_idx->ei_block),
619                             ext4_idx_pblock(path->p_idx));
620                 } else if (path->p_ext) {
621                         ext_debug("  %d:[%d]%d:%llu ",
622                                   le32_to_cpu(path->p_ext->ee_block),
623                                   ext4_ext_is_uninitialized(path->p_ext),
624                                   ext4_ext_get_actual_len(path->p_ext),
625                                   ext4_ext_pblock(path->p_ext));
626                 } else
627                         ext_debug("  []");
628         }
629         ext_debug("\n");
630 }
631
632 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
633 {
634         int depth = ext_depth(inode);
635         struct ext4_extent_header *eh;
636         struct ext4_extent *ex;
637         int i;
638
639         if (!path)
640                 return;
641
642         eh = path[depth].p_hdr;
643         ex = EXT_FIRST_EXTENT(eh);
644
645         ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
646
647         for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
648                 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
649                           ext4_ext_is_uninitialized(ex),
650                           ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
651         }
652         ext_debug("\n");
653 }
654
655 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
656                         ext4_fsblk_t newblock, int level)
657 {
658         int depth = ext_depth(inode);
659         struct ext4_extent *ex;
660
661         if (depth != level) {
662                 struct ext4_extent_idx *idx;
663                 idx = path[level].p_idx;
664                 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
665                         ext_debug("%d: move %d:%llu in new index %llu\n", level,
666                                         le32_to_cpu(idx->ei_block),
667                                         ext4_idx_pblock(idx),
668                                         newblock);
669                         idx++;
670                 }
671
672                 return;
673         }
674
675         ex = path[depth].p_ext;
676         while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
677                 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
678                                 le32_to_cpu(ex->ee_block),
679                                 ext4_ext_pblock(ex),
680                                 ext4_ext_is_uninitialized(ex),
681                                 ext4_ext_get_actual_len(ex),
682                                 newblock);
683                 ex++;
684         }
685 }
686
687 #else
688 #define ext4_ext_show_path(inode, path)
689 #define ext4_ext_show_leaf(inode, path)
690 #define ext4_ext_show_move(inode, path, newblock, level)
691 #endif
692
693 void ext4_ext_drop_refs(struct ext4_ext_path *path)
694 {
695         int depth = path->p_depth;
696         int i;
697
698         for (i = 0; i <= depth; i++, path++)
699                 if (path->p_bh) {
700                         brelse(path->p_bh);
701                         path->p_bh = NULL;
702                 }
703 }
704
705 /*
706  * ext4_ext_binsearch_idx:
707  * binary search for the closest index of the given block
708  * the header must be checked before calling this
709  */
710 static void
711 ext4_ext_binsearch_idx(struct inode *inode,
712                         struct ext4_ext_path *path, ext4_lblk_t block)
713 {
714         struct ext4_extent_header *eh = path->p_hdr;
715         struct ext4_extent_idx *r, *l, *m;
716
717
718         ext_debug("binsearch for %u(idx):  ", block);
719
720         l = EXT_FIRST_INDEX(eh) + 1;
721         r = EXT_LAST_INDEX(eh);
722         while (l <= r) {
723                 m = l + (r - l) / 2;
724                 if (block < le32_to_cpu(m->ei_block))
725                         r = m - 1;
726                 else
727                         l = m + 1;
728                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
729                                 m, le32_to_cpu(m->ei_block),
730                                 r, le32_to_cpu(r->ei_block));
731         }
732
733         path->p_idx = l - 1;
734         ext_debug("  -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
735                   ext4_idx_pblock(path->p_idx));
736
737 #ifdef CHECK_BINSEARCH
738         {
739                 struct ext4_extent_idx *chix, *ix;
740                 int k;
741
742                 chix = ix = EXT_FIRST_INDEX(eh);
743                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
744                   if (k != 0 &&
745                       le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
746                                 printk(KERN_DEBUG "k=%d, ix=0x%p, "
747                                        "first=0x%p\n", k,
748                                        ix, EXT_FIRST_INDEX(eh));
749                                 printk(KERN_DEBUG "%u <= %u\n",
750                                        le32_to_cpu(ix->ei_block),
751                                        le32_to_cpu(ix[-1].ei_block));
752                         }
753                         BUG_ON(k && le32_to_cpu(ix->ei_block)
754                                            <= le32_to_cpu(ix[-1].ei_block));
755                         if (block < le32_to_cpu(ix->ei_block))
756                                 break;
757                         chix = ix;
758                 }
759                 BUG_ON(chix != path->p_idx);
760         }
761 #endif
762
763 }
764
765 /*
766  * ext4_ext_binsearch:
767  * binary search for closest extent of the given block
768  * the header must be checked before calling this
769  */
770 static void
771 ext4_ext_binsearch(struct inode *inode,
772                 struct ext4_ext_path *path, ext4_lblk_t block)
773 {
774         struct ext4_extent_header *eh = path->p_hdr;
775         struct ext4_extent *r, *l, *m;
776
777         if (eh->eh_entries == 0) {
778                 /*
779                  * this leaf is empty:
780                  * we get such a leaf in split/add case
781                  */
782                 return;
783         }
784
785         ext_debug("binsearch for %u:  ", block);
786
787         l = EXT_FIRST_EXTENT(eh) + 1;
788         r = EXT_LAST_EXTENT(eh);
789
790         while (l <= r) {
791                 m = l + (r - l) / 2;
792                 if (block < le32_to_cpu(m->ee_block))
793                         r = m - 1;
794                 else
795                         l = m + 1;
796                 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
797                                 m, le32_to_cpu(m->ee_block),
798                                 r, le32_to_cpu(r->ee_block));
799         }
800
801         path->p_ext = l - 1;
802         ext_debug("  -> %d:%llu:[%d]%d ",
803                         le32_to_cpu(path->p_ext->ee_block),
804                         ext4_ext_pblock(path->p_ext),
805                         ext4_ext_is_uninitialized(path->p_ext),
806                         ext4_ext_get_actual_len(path->p_ext));
807
808 #ifdef CHECK_BINSEARCH
809         {
810                 struct ext4_extent *chex, *ex;
811                 int k;
812
813                 chex = ex = EXT_FIRST_EXTENT(eh);
814                 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
815                         BUG_ON(k && le32_to_cpu(ex->ee_block)
816                                           <= le32_to_cpu(ex[-1].ee_block));
817                         if (block < le32_to_cpu(ex->ee_block))
818                                 break;
819                         chex = ex;
820                 }
821                 BUG_ON(chex != path->p_ext);
822         }
823 #endif
824
825 }
826
827 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
828 {
829         struct ext4_extent_header *eh;
830
831         eh = ext_inode_hdr(inode);
832         eh->eh_depth = 0;
833         eh->eh_entries = 0;
834         eh->eh_magic = EXT4_EXT_MAGIC;
835         eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
836         ext4_mark_inode_dirty(handle, inode);
837         return 0;
838 }
839
840 struct ext4_ext_path *
841 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
842                      struct ext4_ext_path *path, int flags)
843 {
844         struct ext4_extent_header *eh;
845         struct buffer_head *bh;
846         short int depth, i, ppos = 0, alloc = 0;
847         int ret;
848
849         eh = ext_inode_hdr(inode);
850         depth = ext_depth(inode);
851
852         /* account possible depth increase */
853         if (!path) {
854                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
855                                 GFP_NOFS);
856                 if (!path)
857                         return ERR_PTR(-ENOMEM);
858                 alloc = 1;
859         }
860         path[0].p_hdr = eh;
861         path[0].p_bh = NULL;
862
863         i = depth;
864         /* walk through the tree */
865         while (i) {
866                 ext_debug("depth %d: num %d, max %d\n",
867                           ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
868
869                 ext4_ext_binsearch_idx(inode, path + ppos, block);
870                 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
871                 path[ppos].p_depth = i;
872                 path[ppos].p_ext = NULL;
873
874                 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
875                                             flags);
876                 if (IS_ERR(bh)) {
877                         ret = PTR_ERR(bh);
878                         goto err;
879                 }
880
881                 eh = ext_block_hdr(bh);
882                 ppos++;
883                 if (unlikely(ppos > depth)) {
884                         put_bh(bh);
885                         EXT4_ERROR_INODE(inode,
886                                          "ppos %d > depth %d", ppos, depth);
887                         ret = -EIO;
888                         goto err;
889                 }
890                 path[ppos].p_bh = bh;
891                 path[ppos].p_hdr = eh;
892         }
893
894         path[ppos].p_depth = i;
895         path[ppos].p_ext = NULL;
896         path[ppos].p_idx = NULL;
897
898         /* find extent */
899         ext4_ext_binsearch(inode, path + ppos, block);
900         /* if not an empty leaf */
901         if (path[ppos].p_ext)
902                 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
903
904         ext4_ext_show_path(inode, path);
905
906         return path;
907
908 err:
909         ext4_ext_drop_refs(path);
910         if (alloc)
911                 kfree(path);
912         return ERR_PTR(ret);
913 }
914
915 /*
916  * ext4_ext_insert_index:
917  * insert new index [@logical;@ptr] into the block at @curp;
918  * check where to insert: before @curp or after @curp
919  */
920 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
921                                  struct ext4_ext_path *curp,
922                                  int logical, ext4_fsblk_t ptr)
923 {
924         struct ext4_extent_idx *ix;
925         int len, err;
926
927         err = ext4_ext_get_access(handle, inode, curp);
928         if (err)
929                 return err;
930
931         if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
932                 EXT4_ERROR_INODE(inode,
933                                  "logical %d == ei_block %d!",
934                                  logical, le32_to_cpu(curp->p_idx->ei_block));
935                 return -EIO;
936         }
937
938         if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
939                              >= le16_to_cpu(curp->p_hdr->eh_max))) {
940                 EXT4_ERROR_INODE(inode,
941                                  "eh_entries %d >= eh_max %d!",
942                                  le16_to_cpu(curp->p_hdr->eh_entries),
943                                  le16_to_cpu(curp->p_hdr->eh_max));
944                 return -EIO;
945         }
946
947         if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
948                 /* insert after */
949                 ext_debug("insert new index %d after: %llu\n", logical, ptr);
950                 ix = curp->p_idx + 1;
951         } else {
952                 /* insert before */
953                 ext_debug("insert new index %d before: %llu\n", logical, ptr);
954                 ix = curp->p_idx;
955         }
956
957         len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
958         BUG_ON(len < 0);
959         if (len > 0) {
960                 ext_debug("insert new index %d: "
961                                 "move %d indices from 0x%p to 0x%p\n",
962                                 logical, len, ix, ix + 1);
963                 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
964         }
965
966         if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
967                 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
968                 return -EIO;
969         }
970
971         ix->ei_block = cpu_to_le32(logical);
972         ext4_idx_store_pblock(ix, ptr);
973         le16_add_cpu(&curp->p_hdr->eh_entries, 1);
974
975         if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
976                 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
977                 return -EIO;
978         }
979
980         err = ext4_ext_dirty(handle, inode, curp);
981         ext4_std_error(inode->i_sb, err);
982
983         return err;
984 }
985
986 /*
987  * ext4_ext_split:
988  * inserts new subtree into the path, using free index entry
989  * at depth @at:
990  * - allocates all needed blocks (new leaf and all intermediate index blocks)
991  * - makes decision where to split
992  * - moves remaining extents and index entries (right to the split point)
993  *   into the newly allocated blocks
994  * - initializes subtree
995  */
996 static int ext4_ext_split(handle_t *handle, struct inode *inode,
997                           unsigned int flags,
998                           struct ext4_ext_path *path,
999                           struct ext4_extent *newext, int at)
1000 {
1001         struct buffer_head *bh = NULL;
1002         int depth = ext_depth(inode);
1003         struct ext4_extent_header *neh;
1004         struct ext4_extent_idx *fidx;
1005         int i = at, k, m, a;
1006         ext4_fsblk_t newblock, oldblock;
1007         __le32 border;
1008         ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
1009         int err = 0;
1010
1011         /* make decision: where to split? */
1012         /* FIXME: now decision is simplest: at current extent */
1013
1014         /* if current leaf will be split, then we should use
1015          * border from split point */
1016         if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
1017                 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
1018                 return -EIO;
1019         }
1020         if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
1021                 border = path[depth].p_ext[1].ee_block;
1022                 ext_debug("leaf will be split."
1023                                 " next leaf starts at %d\n",
1024                                   le32_to_cpu(border));
1025         } else {
1026                 border = newext->ee_block;
1027                 ext_debug("leaf will be added."
1028                                 " next leaf starts at %d\n",
1029                                 le32_to_cpu(border));
1030         }
1031
1032         /*
1033          * If error occurs, then we break processing
1034          * and mark filesystem read-only. index won't
1035          * be inserted and tree will be in consistent
1036          * state. Next mount will repair buffers too.
1037          */
1038
1039         /*
1040          * Get array to track all allocated blocks.
1041          * We need this to handle errors and free blocks
1042          * upon them.
1043          */
1044         ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
1045         if (!ablocks)
1046                 return -ENOMEM;
1047
1048         /* allocate all needed blocks */
1049         ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
1050         for (a = 0; a < depth - at; a++) {
1051                 newblock = ext4_ext_new_meta_block(handle, inode, path,
1052                                                    newext, &err, flags);
1053                 if (newblock == 0)
1054                         goto cleanup;
1055                 ablocks[a] = newblock;
1056         }
1057
1058         /* initialize new leaf */
1059         newblock = ablocks[--a];
1060         if (unlikely(newblock == 0)) {
1061                 EXT4_ERROR_INODE(inode, "newblock == 0!");
1062                 err = -EIO;
1063                 goto cleanup;
1064         }
1065         bh = sb_getblk(inode->i_sb, newblock);
1066         if (unlikely(!bh)) {
1067                 err = -ENOMEM;
1068                 goto cleanup;
1069         }
1070         lock_buffer(bh);
1071
1072         err = ext4_journal_get_create_access(handle, bh);
1073         if (err)
1074                 goto cleanup;
1075
1076         neh = ext_block_hdr(bh);
1077         neh->eh_entries = 0;
1078         neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1079         neh->eh_magic = EXT4_EXT_MAGIC;
1080         neh->eh_depth = 0;
1081
1082         /* move remainder of path[depth] to the new leaf */
1083         if (unlikely(path[depth].p_hdr->eh_entries !=
1084                      path[depth].p_hdr->eh_max)) {
1085                 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
1086                                  path[depth].p_hdr->eh_entries,
1087                                  path[depth].p_hdr->eh_max);
1088                 err = -EIO;
1089                 goto cleanup;
1090         }
1091         /* start copy from next extent */
1092         m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
1093         ext4_ext_show_move(inode, path, newblock, depth);
1094         if (m) {
1095                 struct ext4_extent *ex;
1096                 ex = EXT_FIRST_EXTENT(neh);
1097                 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
1098                 le16_add_cpu(&neh->eh_entries, m);
1099         }
1100
1101         ext4_extent_block_csum_set(inode, neh);
1102         set_buffer_uptodate(bh);
1103         unlock_buffer(bh);
1104
1105         err = ext4_handle_dirty_metadata(handle, inode, bh);
1106         if (err)
1107                 goto cleanup;
1108         brelse(bh);
1109         bh = NULL;
1110
1111         /* correct old leaf */
1112         if (m) {
1113                 err = ext4_ext_get_access(handle, inode, path + depth);
1114                 if (err)
1115                         goto cleanup;
1116                 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1117                 err = ext4_ext_dirty(handle, inode, path + depth);
1118                 if (err)
1119                         goto cleanup;
1120
1121         }
1122
1123         /* create intermediate indexes */
1124         k = depth - at - 1;
1125         if (unlikely(k < 0)) {
1126                 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1127                 err = -EIO;
1128                 goto cleanup;
1129         }
1130         if (k)
1131                 ext_debug("create %d intermediate indices\n", k);
1132         /* insert new index into current index block */
1133         /* current depth stored in i var */
1134         i = depth - 1;
1135         while (k--) {
1136                 oldblock = newblock;
1137                 newblock = ablocks[--a];
1138                 bh = sb_getblk(inode->i_sb, newblock);
1139                 if (unlikely(!bh)) {
1140                         err = -ENOMEM;
1141                         goto cleanup;
1142                 }
1143                 lock_buffer(bh);
1144
1145                 err = ext4_journal_get_create_access(handle, bh);
1146                 if (err)
1147                         goto cleanup;
1148
1149                 neh = ext_block_hdr(bh);
1150                 neh->eh_entries = cpu_to_le16(1);
1151                 neh->eh_magic = EXT4_EXT_MAGIC;
1152                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1153                 neh->eh_depth = cpu_to_le16(depth - i);
1154                 fidx = EXT_FIRST_INDEX(neh);
1155                 fidx->ei_block = border;
1156                 ext4_idx_store_pblock(fidx, oldblock);
1157
1158                 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1159                                 i, newblock, le32_to_cpu(border), oldblock);
1160
1161                 /* move remainder of path[i] to the new index block */
1162                 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1163                                         EXT_LAST_INDEX(path[i].p_hdr))) {
1164                         EXT4_ERROR_INODE(inode,
1165                                          "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1166                                          le32_to_cpu(path[i].p_ext->ee_block));
1167                         err = -EIO;
1168                         goto cleanup;
1169                 }
1170                 /* start copy indexes */
1171                 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1172                 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1173                                 EXT_MAX_INDEX(path[i].p_hdr));
1174                 ext4_ext_show_move(inode, path, newblock, i);
1175                 if (m) {
1176                         memmove(++fidx, path[i].p_idx,
1177                                 sizeof(struct ext4_extent_idx) * m);
1178                         le16_add_cpu(&neh->eh_entries, m);
1179                 }
1180                 ext4_extent_block_csum_set(inode, neh);
1181                 set_buffer_uptodate(bh);
1182                 unlock_buffer(bh);
1183
1184                 err = ext4_handle_dirty_metadata(handle, inode, bh);
1185                 if (err)
1186                         goto cleanup;
1187                 brelse(bh);
1188                 bh = NULL;
1189
1190                 /* correct old index */
1191                 if (m) {
1192                         err = ext4_ext_get_access(handle, inode, path + i);
1193                         if (err)
1194                                 goto cleanup;
1195                         le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1196                         err = ext4_ext_dirty(handle, inode, path + i);
1197                         if (err)
1198                                 goto cleanup;
1199                 }
1200
1201                 i--;
1202         }
1203
1204         /* insert new index */
1205         err = ext4_ext_insert_index(handle, inode, path + at,
1206                                     le32_to_cpu(border), newblock);
1207
1208 cleanup:
1209         if (bh) {
1210                 if (buffer_locked(bh))
1211                         unlock_buffer(bh);
1212                 brelse(bh);
1213         }
1214
1215         if (err) {
1216                 /* free all allocated blocks in error case */
1217                 for (i = 0; i < depth; i++) {
1218                         if (!ablocks[i])
1219                                 continue;
1220                         ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1221                                          EXT4_FREE_BLOCKS_METADATA);
1222                 }
1223         }
1224         kfree(ablocks);
1225
1226         return err;
1227 }
1228
1229 /*
1230  * ext4_ext_grow_indepth:
1231  * implements tree growing procedure:
1232  * - allocates new block
1233  * - moves top-level data (index block or leaf) into the new block
1234  * - initializes new top-level, creating index that points to the
1235  *   just created block
1236  */
1237 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1238                                  unsigned int flags,
1239                                  struct ext4_extent *newext)
1240 {
1241         struct ext4_extent_header *neh;
1242         struct buffer_head *bh;
1243         ext4_fsblk_t newblock;
1244         int err = 0;
1245
1246         newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1247                 newext, &err, flags);
1248         if (newblock == 0)
1249                 return err;
1250
1251         bh = sb_getblk(inode->i_sb, newblock);
1252         if (unlikely(!bh))
1253                 return -ENOMEM;
1254         lock_buffer(bh);
1255
1256         err = ext4_journal_get_create_access(handle, bh);
1257         if (err) {
1258                 unlock_buffer(bh);
1259                 goto out;
1260         }
1261
1262         /* move top-level index/leaf into new block */
1263         memmove(bh->b_data, EXT4_I(inode)->i_data,
1264                 sizeof(EXT4_I(inode)->i_data));
1265
1266         /* set size of new block */
1267         neh = ext_block_hdr(bh);
1268         /* old root could have indexes or leaves
1269          * so calculate e_max right way */
1270         if (ext_depth(inode))
1271                 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1272         else
1273                 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1274         neh->eh_magic = EXT4_EXT_MAGIC;
1275         ext4_extent_block_csum_set(inode, neh);
1276         set_buffer_uptodate(bh);
1277         unlock_buffer(bh);
1278
1279         err = ext4_handle_dirty_metadata(handle, inode, bh);
1280         if (err)
1281                 goto out;
1282
1283         /* Update top-level index: num,max,pointer */
1284         neh = ext_inode_hdr(inode);
1285         neh->eh_entries = cpu_to_le16(1);
1286         ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1287         if (neh->eh_depth == 0) {
1288                 /* Root extent block becomes index block */
1289                 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1290                 EXT_FIRST_INDEX(neh)->ei_block =
1291                         EXT_FIRST_EXTENT(neh)->ee_block;
1292         }
1293         ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1294                   le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1295                   le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1296                   ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1297
1298         le16_add_cpu(&neh->eh_depth, 1);
1299         ext4_mark_inode_dirty(handle, inode);
1300 out:
1301         brelse(bh);
1302
1303         return err;
1304 }
1305
1306 /*
1307  * ext4_ext_create_new_leaf:
1308  * finds empty index and adds new leaf.
1309  * if no free index is found, then it requests in-depth growing.
1310  */
1311 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1312                                     unsigned int mb_flags,
1313                                     unsigned int gb_flags,
1314                                     struct ext4_ext_path *path,
1315                                     struct ext4_extent *newext)
1316 {
1317         struct ext4_ext_path *curp;
1318         int depth, i, err = 0;
1319
1320 repeat:
1321         i = depth = ext_depth(inode);
1322
1323         /* walk up to the tree and look for free index entry */
1324         curp = path + depth;
1325         while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1326                 i--;
1327                 curp--;
1328         }
1329
1330         /* we use already allocated block for index block,
1331          * so subsequent data blocks should be contiguous */
1332         if (EXT_HAS_FREE_INDEX(curp)) {
1333                 /* if we found index with free entry, then use that
1334                  * entry: create all needed subtree and add new leaf */
1335                 err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
1336                 if (err)
1337                         goto out;
1338
1339                 /* refill path */
1340                 ext4_ext_drop_refs(path);
1341                 path = ext4_ext_find_extent(inode,
1342                                     (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1343                                     path, gb_flags);
1344                 if (IS_ERR(path))
1345                         err = PTR_ERR(path);
1346         } else {
1347                 /* tree is full, time to grow in depth */
1348                 err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext);
1349                 if (err)
1350                         goto out;
1351
1352                 /* refill path */
1353                 ext4_ext_drop_refs(path);
1354                 path = ext4_ext_find_extent(inode,
1355                                    (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1356                                     path, gb_flags);
1357                 if (IS_ERR(path)) {
1358                         err = PTR_ERR(path);
1359                         goto out;
1360                 }
1361
1362                 /*
1363                  * only first (depth 0 -> 1) produces free space;
1364                  * in all other cases we have to split the grown tree
1365                  */
1366                 depth = ext_depth(inode);
1367                 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1368                         /* now we need to split */
1369                         goto repeat;
1370                 }
1371         }
1372
1373 out:
1374         return err;
1375 }
1376
1377 /*
1378  * search the closest allocated block to the left for *logical
1379  * and returns it at @logical + it's physical address at @phys
1380  * if *logical is the smallest allocated block, the function
1381  * returns 0 at @phys
1382  * return value contains 0 (success) or error code
1383  */
1384 static int ext4_ext_search_left(struct inode *inode,
1385                                 struct ext4_ext_path *path,
1386                                 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1387 {
1388         struct ext4_extent_idx *ix;
1389         struct ext4_extent *ex;
1390         int depth, ee_len;
1391
1392         if (unlikely(path == NULL)) {
1393                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1394                 return -EIO;
1395         }
1396         depth = path->p_depth;
1397         *phys = 0;
1398
1399         if (depth == 0 && path->p_ext == NULL)
1400                 return 0;
1401
1402         /* usually extent in the path covers blocks smaller
1403          * then *logical, but it can be that extent is the
1404          * first one in the file */
1405
1406         ex = path[depth].p_ext;
1407         ee_len = ext4_ext_get_actual_len(ex);
1408         if (*logical < le32_to_cpu(ex->ee_block)) {
1409                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1410                         EXT4_ERROR_INODE(inode,
1411                                          "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1412                                          *logical, le32_to_cpu(ex->ee_block));
1413                         return -EIO;
1414                 }
1415                 while (--depth >= 0) {
1416                         ix = path[depth].p_idx;
1417                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1418                                 EXT4_ERROR_INODE(inode,
1419                                   "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1420                                   ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1421                                   EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1422                 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1423                                   depth);
1424                                 return -EIO;
1425                         }
1426                 }
1427                 return 0;
1428         }
1429
1430         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1431                 EXT4_ERROR_INODE(inode,
1432                                  "logical %d < ee_block %d + ee_len %d!",
1433                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1434                 return -EIO;
1435         }
1436
1437         *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1438         *phys = ext4_ext_pblock(ex) + ee_len - 1;
1439         return 0;
1440 }
1441
1442 /*
1443  * search the closest allocated block to the right for *logical
1444  * and returns it at @logical + it's physical address at @phys
1445  * if *logical is the largest allocated block, the function
1446  * returns 0 at @phys
1447  * return value contains 0 (success) or error code
1448  */
1449 static int ext4_ext_search_right(struct inode *inode,
1450                                  struct ext4_ext_path *path,
1451                                  ext4_lblk_t *logical, ext4_fsblk_t *phys,
1452                                  struct ext4_extent **ret_ex)
1453 {
1454         struct buffer_head *bh = NULL;
1455         struct ext4_extent_header *eh;
1456         struct ext4_extent_idx *ix;
1457         struct ext4_extent *ex;
1458         ext4_fsblk_t block;
1459         int depth;      /* Note, NOT eh_depth; depth from top of tree */
1460         int ee_len;
1461
1462         if (unlikely(path == NULL)) {
1463                 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1464                 return -EIO;
1465         }
1466         depth = path->p_depth;
1467         *phys = 0;
1468
1469         if (depth == 0 && path->p_ext == NULL)
1470                 return 0;
1471
1472         /* usually extent in the path covers blocks smaller
1473          * then *logical, but it can be that extent is the
1474          * first one in the file */
1475
1476         ex = path[depth].p_ext;
1477         ee_len = ext4_ext_get_actual_len(ex);
1478         if (*logical < le32_to_cpu(ex->ee_block)) {
1479                 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1480                         EXT4_ERROR_INODE(inode,
1481                                          "first_extent(path[%d].p_hdr) != ex",
1482                                          depth);
1483                         return -EIO;
1484                 }
1485                 while (--depth >= 0) {
1486                         ix = path[depth].p_idx;
1487                         if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1488                                 EXT4_ERROR_INODE(inode,
1489                                                  "ix != EXT_FIRST_INDEX *logical %d!",
1490                                                  *logical);
1491                                 return -EIO;
1492                         }
1493                 }
1494                 goto found_extent;
1495         }
1496
1497         if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1498                 EXT4_ERROR_INODE(inode,
1499                                  "logical %d < ee_block %d + ee_len %d!",
1500                                  *logical, le32_to_cpu(ex->ee_block), ee_len);
1501                 return -EIO;
1502         }
1503
1504         if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1505                 /* next allocated block in this leaf */
1506                 ex++;
1507                 goto found_extent;
1508         }
1509
1510         /* go up and search for index to the right */
1511         while (--depth >= 0) {
1512                 ix = path[depth].p_idx;
1513                 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1514                         goto got_index;
1515         }
1516
1517         /* we've gone up to the root and found no index to the right */
1518         return 0;
1519
1520 got_index:
1521         /* we've found index to the right, let's
1522          * follow it and find the closest allocated
1523          * block to the right */
1524         ix++;
1525         block = ext4_idx_pblock(ix);
1526         while (++depth < path->p_depth) {
1527                 /* subtract from p_depth to get proper eh_depth */
1528                 bh = read_extent_tree_block(inode, block,
1529                                             path->p_depth - depth, 0);
1530                 if (IS_ERR(bh))
1531                         return PTR_ERR(bh);
1532                 eh = ext_block_hdr(bh);
1533                 ix = EXT_FIRST_INDEX(eh);
1534                 block = ext4_idx_pblock(ix);
1535                 put_bh(bh);
1536         }
1537
1538         bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
1539         if (IS_ERR(bh))
1540                 return PTR_ERR(bh);
1541         eh = ext_block_hdr(bh);
1542         ex = EXT_FIRST_EXTENT(eh);
1543 found_extent:
1544         *logical = le32_to_cpu(ex->ee_block);
1545         *phys = ext4_ext_pblock(ex);
1546         *ret_ex = ex;
1547         if (bh)
1548                 put_bh(bh);
1549         return 0;
1550 }
1551
1552 /*
1553  * ext4_ext_next_allocated_block:
1554  * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1555  * NOTE: it considers block number from index entry as
1556  * allocated block. Thus, index entries have to be consistent
1557  * with leaves.
1558  */
1559 static ext4_lblk_t
1560 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1561 {
1562         int depth;
1563
1564         BUG_ON(path == NULL);
1565         depth = path->p_depth;
1566
1567         if (depth == 0 && path->p_ext == NULL)
1568                 return EXT_MAX_BLOCKS;
1569
1570         while (depth >= 0) {
1571                 if (depth == path->p_depth) {
1572                         /* leaf */
1573                         if (path[depth].p_ext &&
1574                                 path[depth].p_ext !=
1575                                         EXT_LAST_EXTENT(path[depth].p_hdr))
1576                           return le32_to_cpu(path[depth].p_ext[1].ee_block);
1577                 } else {
1578                         /* index */
1579                         if (path[depth].p_idx !=
1580                                         EXT_LAST_INDEX(path[depth].p_hdr))
1581                           return le32_to_cpu(path[depth].p_idx[1].ei_block);
1582                 }
1583                 depth--;
1584         }
1585
1586         return EXT_MAX_BLOCKS;
1587 }
1588
1589 /*
1590  * ext4_ext_next_leaf_block:
1591  * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1592  */
1593 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1594 {
1595         int depth;
1596
1597         BUG_ON(path == NULL);
1598         depth = path->p_depth;
1599
1600         /* zero-tree has no leaf blocks at all */
1601         if (depth == 0)
1602                 return EXT_MAX_BLOCKS;
1603
1604         /* go to index block */
1605         depth--;
1606
1607         while (depth >= 0) {
1608                 if (path[depth].p_idx !=
1609                                 EXT_LAST_INDEX(path[depth].p_hdr))
1610                         return (ext4_lblk_t)
1611                                 le32_to_cpu(path[depth].p_idx[1].ei_block);
1612                 depth--;
1613         }
1614
1615         return EXT_MAX_BLOCKS;
1616 }
1617
1618 /*
1619  * ext4_ext_correct_indexes:
1620  * if leaf gets modified and modified extent is first in the leaf,
1621  * then we have to correct all indexes above.
1622  * TODO: do we need to correct tree in all cases?
1623  */
1624 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1625                                 struct ext4_ext_path *path)
1626 {
1627         struct ext4_extent_header *eh;
1628         int depth = ext_depth(inode);
1629         struct ext4_extent *ex;
1630         __le32 border;
1631         int k, err = 0;
1632
1633         eh = path[depth].p_hdr;
1634         ex = path[depth].p_ext;
1635
1636         if (unlikely(ex == NULL || eh == NULL)) {
1637                 EXT4_ERROR_INODE(inode,
1638                                  "ex %p == NULL or eh %p == NULL", ex, eh);
1639                 return -EIO;
1640         }
1641
1642         if (depth == 0) {
1643                 /* there is no tree at all */
1644                 return 0;
1645         }
1646
1647         if (ex != EXT_FIRST_EXTENT(eh)) {
1648                 /* we correct tree if first leaf got modified only */
1649                 return 0;
1650         }
1651
1652         /*
1653          * TODO: we need correction if border is smaller than current one
1654          */
1655         k = depth - 1;
1656         border = path[depth].p_ext->ee_block;
1657         err = ext4_ext_get_access(handle, inode, path + k);
1658         if (err)
1659                 return err;
1660         path[k].p_idx->ei_block = border;
1661         err = ext4_ext_dirty(handle, inode, path + k);
1662         if (err)
1663                 return err;
1664
1665         while (k--) {
1666                 /* change all left-side indexes */
1667                 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1668                         break;
1669                 err = ext4_ext_get_access(handle, inode, path + k);
1670                 if (err)
1671                         break;
1672                 path[k].p_idx->ei_block = border;
1673                 err = ext4_ext_dirty(handle, inode, path + k);
1674                 if (err)
1675                         break;
1676         }
1677
1678         return err;
1679 }
1680
1681 int
1682 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1683                                 struct ext4_extent *ex2)
1684 {
1685         unsigned short ext1_ee_len, ext2_ee_len;
1686
1687         /*
1688          * Make sure that both extents are initialized. We don't merge
1689          * uninitialized extents so that we can be sure that end_io code has
1690          * the extent that was written properly split out and conversion to
1691          * initialized is trivial.
1692          */
1693         if (ext4_ext_is_uninitialized(ex1) != ext4_ext_is_uninitialized(ex2))
1694                 return 0;
1695
1696         ext1_ee_len = ext4_ext_get_actual_len(ex1);
1697         ext2_ee_len = ext4_ext_get_actual_len(ex2);
1698
1699         if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1700                         le32_to_cpu(ex2->ee_block))
1701                 return 0;
1702
1703         /*
1704          * To allow future support for preallocated extents to be added
1705          * as an RO_COMPAT feature, refuse to merge to extents if
1706          * this can result in the top bit of ee_len being set.
1707          */
1708         if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
1709                 return 0;
1710         if (ext4_ext_is_uninitialized(ex1) &&
1711             (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
1712              atomic_read(&EXT4_I(inode)->i_unwritten) ||
1713              (ext1_ee_len + ext2_ee_len > EXT_UNINIT_MAX_LEN)))
1714                 return 0;
1715 #ifdef AGGRESSIVE_TEST
1716         if (ext1_ee_len >= 4)
1717                 return 0;
1718 #endif
1719
1720         if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1721                 return 1;
1722         return 0;
1723 }
1724
1725 /*
1726  * This function tries to merge the "ex" extent to the next extent in the tree.
1727  * It always tries to merge towards right. If you want to merge towards
1728  * left, pass "ex - 1" as argument instead of "ex".
1729  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1730  * 1 if they got merged.
1731  */
1732 static int ext4_ext_try_to_merge_right(struct inode *inode,
1733                                  struct ext4_ext_path *path,
1734                                  struct ext4_extent *ex)
1735 {
1736         struct ext4_extent_header *eh;
1737         unsigned int depth, len;
1738         int merge_done = 0, uninit;
1739
1740         depth = ext_depth(inode);
1741         BUG_ON(path[depth].p_hdr == NULL);
1742         eh = path[depth].p_hdr;
1743
1744         while (ex < EXT_LAST_EXTENT(eh)) {
1745                 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1746                         break;
1747                 /* merge with next extent! */
1748                 uninit = ext4_ext_is_uninitialized(ex);
1749                 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1750                                 + ext4_ext_get_actual_len(ex + 1));
1751                 if (uninit)
1752                         ext4_ext_mark_uninitialized(ex);
1753
1754                 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1755                         len = (EXT_LAST_EXTENT(eh) - ex - 1)
1756                                 * sizeof(struct ext4_extent);
1757                         memmove(ex + 1, ex + 2, len);
1758                 }
1759                 le16_add_cpu(&eh->eh_entries, -1);
1760                 merge_done = 1;
1761                 WARN_ON(eh->eh_entries == 0);
1762                 if (!eh->eh_entries)
1763                         EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1764         }
1765
1766         return merge_done;
1767 }
1768
1769 /*
1770  * This function does a very simple check to see if we can collapse
1771  * an extent tree with a single extent tree leaf block into the inode.
1772  */
1773 static void ext4_ext_try_to_merge_up(handle_t *handle,
1774                                      struct inode *inode,
1775                                      struct ext4_ext_path *path)
1776 {
1777         size_t s;
1778         unsigned max_root = ext4_ext_space_root(inode, 0);
1779         ext4_fsblk_t blk;
1780
1781         if ((path[0].p_depth != 1) ||
1782             (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1783             (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1784                 return;
1785
1786         /*
1787          * We need to modify the block allocation bitmap and the block
1788          * group descriptor to release the extent tree block.  If we
1789          * can't get the journal credits, give up.
1790          */
1791         if (ext4_journal_extend(handle, 2))
1792                 return;
1793
1794         /*
1795          * Copy the extent data up to the inode
1796          */
1797         blk = ext4_idx_pblock(path[0].p_idx);
1798         s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1799                 sizeof(struct ext4_extent_idx);
1800         s += sizeof(struct ext4_extent_header);
1801
1802         memcpy(path[0].p_hdr, path[1].p_hdr, s);
1803         path[0].p_depth = 0;
1804         path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1805                 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1806         path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1807
1808         brelse(path[1].p_bh);
1809         ext4_free_blocks(handle, inode, NULL, blk, 1,
1810                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
1811                          EXT4_FREE_BLOCKS_RESERVE);
1812 }
1813
1814 /*
1815  * This function tries to merge the @ex extent to neighbours in the tree.
1816  * return 1 if merge left else 0.
1817  */
1818 static void ext4_ext_try_to_merge(handle_t *handle,
1819                                   struct inode *inode,
1820                                   struct ext4_ext_path *path,
1821                                   struct ext4_extent *ex) {
1822         struct ext4_extent_header *eh;
1823         unsigned int depth;
1824         int merge_done = 0;
1825
1826         depth = ext_depth(inode);
1827         BUG_ON(path[depth].p_hdr == NULL);
1828         eh = path[depth].p_hdr;
1829
1830         if (ex > EXT_FIRST_EXTENT(eh))
1831                 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1832
1833         if (!merge_done)
1834                 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1835
1836         ext4_ext_try_to_merge_up(handle, inode, path);
1837 }
1838
1839 /*
1840  * check if a portion of the "newext" extent overlaps with an
1841  * existing extent.
1842  *
1843  * If there is an overlap discovered, it updates the length of the newext
1844  * such that there will be no overlap, and then returns 1.
1845  * If there is no overlap found, it returns 0.
1846  */
1847 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1848                                            struct inode *inode,
1849                                            struct ext4_extent *newext,
1850                                            struct ext4_ext_path *path)
1851 {
1852         ext4_lblk_t b1, b2;
1853         unsigned int depth, len1;
1854         unsigned int ret = 0;
1855
1856         b1 = le32_to_cpu(newext->ee_block);
1857         len1 = ext4_ext_get_actual_len(newext);
1858         depth = ext_depth(inode);
1859         if (!path[depth].p_ext)
1860                 goto out;
1861         b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
1862
1863         /*
1864          * get the next allocated block if the extent in the path
1865          * is before the requested block(s)
1866          */
1867         if (b2 < b1) {
1868                 b2 = ext4_ext_next_allocated_block(path);
1869                 if (b2 == EXT_MAX_BLOCKS)
1870                         goto out;
1871                 b2 = EXT4_LBLK_CMASK(sbi, b2);
1872         }
1873
1874         /* check for wrap through zero on extent logical start block*/
1875         if (b1 + len1 < b1) {
1876                 len1 = EXT_MAX_BLOCKS - b1;
1877                 newext->ee_len = cpu_to_le16(len1);
1878                 ret = 1;
1879         }
1880
1881         /* check for overlap */
1882         if (b1 + len1 > b2) {
1883                 newext->ee_len = cpu_to_le16(b2 - b1);
1884                 ret = 1;
1885         }
1886 out:
1887         return ret;
1888 }
1889
1890 /*
1891  * ext4_ext_insert_extent:
1892  * tries to merge requsted extent into the existing extent or
1893  * inserts requested extent as new one into the tree,
1894  * creating new leaf in the no-space case.
1895  */
1896 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1897                                 struct ext4_ext_path *path,
1898                                 struct ext4_extent *newext, int gb_flags)
1899 {
1900         struct ext4_extent_header *eh;
1901         struct ext4_extent *ex, *fex;
1902         struct ext4_extent *nearex; /* nearest extent */
1903         struct ext4_ext_path *npath = NULL;
1904         int depth, len, err;
1905         ext4_lblk_t next;
1906         int mb_flags = 0, uninit;
1907
1908         if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1909                 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1910                 return -EIO;
1911         }
1912         depth = ext_depth(inode);
1913         ex = path[depth].p_ext;
1914         eh = path[depth].p_hdr;
1915         if (unlikely(path[depth].p_hdr == NULL)) {
1916                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1917                 return -EIO;
1918         }
1919
1920         /* try to insert block into found extent and return */
1921         if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
1922
1923                 /*
1924                  * Try to see whether we should rather test the extent on
1925                  * right from ex, or from the left of ex. This is because
1926                  * ext4_ext_find_extent() can return either extent on the
1927                  * left, or on the right from the searched position. This
1928                  * will make merging more effective.
1929                  */
1930                 if (ex < EXT_LAST_EXTENT(eh) &&
1931                     (le32_to_cpu(ex->ee_block) +
1932                     ext4_ext_get_actual_len(ex) <
1933                     le32_to_cpu(newext->ee_block))) {
1934                         ex += 1;
1935                         goto prepend;
1936                 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1937                            (le32_to_cpu(newext->ee_block) +
1938                            ext4_ext_get_actual_len(newext) <
1939                            le32_to_cpu(ex->ee_block)))
1940                         ex -= 1;
1941
1942                 /* Try to append newex to the ex */
1943                 if (ext4_can_extents_be_merged(inode, ex, newext)) {
1944                         ext_debug("append [%d]%d block to %u:[%d]%d"
1945                                   "(from %llu)\n",
1946                                   ext4_ext_is_uninitialized(newext),
1947                                   ext4_ext_get_actual_len(newext),
1948                                   le32_to_cpu(ex->ee_block),
1949                                   ext4_ext_is_uninitialized(ex),
1950                                   ext4_ext_get_actual_len(ex),
1951                                   ext4_ext_pblock(ex));
1952                         err = ext4_ext_get_access(handle, inode,
1953                                                   path + depth);
1954                         if (err)
1955                                 return err;
1956                         uninit = ext4_ext_is_uninitialized(ex);
1957                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1958                                         + ext4_ext_get_actual_len(newext));
1959                         if (uninit)
1960                                 ext4_ext_mark_uninitialized(ex);
1961                         eh = path[depth].p_hdr;
1962                         nearex = ex;
1963                         goto merge;
1964                 }
1965
1966 prepend:
1967                 /* Try to prepend newex to the ex */
1968                 if (ext4_can_extents_be_merged(inode, newext, ex)) {
1969                         ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
1970                                   "(from %llu)\n",
1971                                   le32_to_cpu(newext->ee_block),
1972                                   ext4_ext_is_uninitialized(newext),
1973                                   ext4_ext_get_actual_len(newext),
1974                                   le32_to_cpu(ex->ee_block),
1975                                   ext4_ext_is_uninitialized(ex),
1976                                   ext4_ext_get_actual_len(ex),
1977                                   ext4_ext_pblock(ex));
1978                         err = ext4_ext_get_access(handle, inode,
1979                                                   path + depth);
1980                         if (err)
1981                                 return err;
1982
1983                         uninit = ext4_ext_is_uninitialized(ex);
1984                         ex->ee_block = newext->ee_block;
1985                         ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
1986                         ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1987                                         + ext4_ext_get_actual_len(newext));
1988                         if (uninit)
1989                                 ext4_ext_mark_uninitialized(ex);
1990                         eh = path[depth].p_hdr;
1991                         nearex = ex;
1992                         goto merge;
1993                 }
1994         }
1995
1996         depth = ext_depth(inode);
1997         eh = path[depth].p_hdr;
1998         if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1999                 goto has_space;
2000
2001         /* probably next leaf has space for us? */
2002         fex = EXT_LAST_EXTENT(eh);
2003         next = EXT_MAX_BLOCKS;
2004         if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
2005                 next = ext4_ext_next_leaf_block(path);
2006         if (next != EXT_MAX_BLOCKS) {
2007                 ext_debug("next leaf block - %u\n", next);
2008                 BUG_ON(npath != NULL);
2009                 npath = ext4_ext_find_extent(inode, next, NULL, 0);
2010                 if (IS_ERR(npath))
2011                         return PTR_ERR(npath);
2012                 BUG_ON(npath->p_depth != path->p_depth);
2013                 eh = npath[depth].p_hdr;
2014                 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
2015                         ext_debug("next leaf isn't full(%d)\n",
2016                                   le16_to_cpu(eh->eh_entries));
2017                         path = npath;
2018                         goto has_space;
2019                 }
2020                 ext_debug("next leaf has no free space(%d,%d)\n",
2021                           le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
2022         }
2023
2024         /*
2025          * There is no free space in the found leaf.
2026          * We're gonna add a new leaf in the tree.
2027          */
2028         if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2029                 mb_flags = EXT4_MB_USE_RESERVED;
2030         err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2031                                        path, newext);
2032         if (err)
2033                 goto cleanup;
2034         depth = ext_depth(inode);
2035         eh = path[depth].p_hdr;
2036
2037 has_space:
2038         nearex = path[depth].p_ext;
2039
2040         err = ext4_ext_get_access(handle, inode, path + depth);
2041         if (err)
2042                 goto cleanup;
2043
2044         if (!nearex) {
2045                 /* there is no extent in this leaf, create first one */
2046                 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
2047                                 le32_to_cpu(newext->ee_block),
2048                                 ext4_ext_pblock(newext),
2049                                 ext4_ext_is_uninitialized(newext),
2050                                 ext4_ext_get_actual_len(newext));
2051                 nearex = EXT_FIRST_EXTENT(eh);
2052         } else {
2053                 if (le32_to_cpu(newext->ee_block)
2054                            > le32_to_cpu(nearex->ee_block)) {
2055                         /* Insert after */
2056                         ext_debug("insert %u:%llu:[%d]%d before: "
2057                                         "nearest %p\n",
2058                                         le32_to_cpu(newext->ee_block),
2059                                         ext4_ext_pblock(newext),
2060                                         ext4_ext_is_uninitialized(newext),
2061                                         ext4_ext_get_actual_len(newext),
2062                                         nearex);
2063                         nearex++;
2064                 } else {
2065                         /* Insert before */
2066                         BUG_ON(newext->ee_block == nearex->ee_block);
2067                         ext_debug("insert %u:%llu:[%d]%d after: "
2068                                         "nearest %p\n",
2069                                         le32_to_cpu(newext->ee_block),
2070                                         ext4_ext_pblock(newext),
2071                                         ext4_ext_is_uninitialized(newext),
2072                                         ext4_ext_get_actual_len(newext),
2073                                         nearex);
2074                 }
2075                 len = EXT_LAST_EXTENT(eh) - nearex + 1;
2076                 if (len > 0) {
2077                         ext_debug("insert %u:%llu:[%d]%d: "
2078                                         "move %d extents from 0x%p to 0x%p\n",
2079                                         le32_to_cpu(newext->ee_block),
2080                                         ext4_ext_pblock(newext),
2081                                         ext4_ext_is_uninitialized(newext),
2082                                         ext4_ext_get_actual_len(newext),
2083                                         len, nearex, nearex + 1);
2084                         memmove(nearex + 1, nearex,
2085                                 len * sizeof(struct ext4_extent));
2086                 }
2087         }
2088
2089         le16_add_cpu(&eh->eh_entries, 1);
2090         path[depth].p_ext = nearex;
2091         nearex->ee_block = newext->ee_block;
2092         ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2093         nearex->ee_len = newext->ee_len;
2094
2095 merge:
2096         /* try to merge extents */
2097         if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
2098                 ext4_ext_try_to_merge(handle, inode, path, nearex);
2099
2100
2101         /* time to correct all indexes above */
2102         err = ext4_ext_correct_indexes(handle, inode, path);
2103         if (err)
2104                 goto cleanup;
2105
2106         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2107
2108 cleanup:
2109         if (npath) {
2110                 ext4_ext_drop_refs(npath);
2111                 kfree(npath);
2112         }
2113         return err;
2114 }
2115
2116 static int ext4_fill_fiemap_extents(struct inode *inode,
2117                                     ext4_lblk_t block, ext4_lblk_t num,
2118                                     struct fiemap_extent_info *fieinfo)
2119 {
2120         struct ext4_ext_path *path = NULL;
2121         struct ext4_extent *ex;
2122         struct extent_status es;
2123         ext4_lblk_t next, next_del, start = 0, end = 0;
2124         ext4_lblk_t last = block + num;
2125         int exists, depth = 0, err = 0;
2126         unsigned int flags = 0;
2127         unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2128
2129         while (block < last && block != EXT_MAX_BLOCKS) {
2130                 num = last - block;
2131                 /* find extent for this block */
2132                 down_read(&EXT4_I(inode)->i_data_sem);
2133
2134                 if (path && ext_depth(inode) != depth) {
2135                         /* depth was changed. we have to realloc path */
2136                         kfree(path);
2137                         path = NULL;
2138                 }
2139
2140                 path = ext4_ext_find_extent(inode, block, path, 0);
2141                 if (IS_ERR(path)) {
2142                         up_read(&EXT4_I(inode)->i_data_sem);
2143                         err = PTR_ERR(path);
2144                         path = NULL;
2145                         break;
2146                 }
2147
2148                 depth = ext_depth(inode);
2149                 if (unlikely(path[depth].p_hdr == NULL)) {
2150                         up_read(&EXT4_I(inode)->i_data_sem);
2151                         EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2152                         err = -EIO;
2153                         break;
2154                 }
2155                 ex = path[depth].p_ext;
2156                 next = ext4_ext_next_allocated_block(path);
2157                 ext4_ext_drop_refs(path);
2158
2159                 flags = 0;
2160                 exists = 0;
2161                 if (!ex) {
2162                         /* there is no extent yet, so try to allocate
2163                          * all requested space */
2164                         start = block;
2165                         end = block + num;
2166                 } else if (le32_to_cpu(ex->ee_block) > block) {
2167                         /* need to allocate space before found extent */
2168                         start = block;
2169                         end = le32_to_cpu(ex->ee_block);
2170                         if (block + num < end)
2171                                 end = block + num;
2172                 } else if (block >= le32_to_cpu(ex->ee_block)
2173                                         + ext4_ext_get_actual_len(ex)) {
2174                         /* need to allocate space after found extent */
2175                         start = block;
2176                         end = block + num;
2177                         if (end >= next)
2178                                 end = next;
2179                 } else if (block >= le32_to_cpu(ex->ee_block)) {
2180                         /*
2181                          * some part of requested space is covered
2182                          * by found extent
2183                          */
2184                         start = block;
2185                         end = le32_to_cpu(ex->ee_block)
2186                                 + ext4_ext_get_actual_len(ex);
2187                         if (block + num < end)
2188                                 end = block + num;
2189                         exists = 1;
2190                 } else {
2191                         BUG();
2192                 }
2193                 BUG_ON(end <= start);
2194
2195                 if (!exists) {
2196                         es.es_lblk = start;
2197                         es.es_len = end - start;
2198                         es.es_pblk = 0;
2199                 } else {
2200                         es.es_lblk = le32_to_cpu(ex->ee_block);
2201                         es.es_len = ext4_ext_get_actual_len(ex);
2202                         es.es_pblk = ext4_ext_pblock(ex);
2203                         if (ext4_ext_is_uninitialized(ex))
2204                                 flags |= FIEMAP_EXTENT_UNWRITTEN;
2205                 }
2206
2207                 /*
2208                  * Find delayed extent and update es accordingly. We call
2209                  * it even in !exists case to find out whether es is the
2210                  * last existing extent or not.
2211                  */
2212                 next_del = ext4_find_delayed_extent(inode, &es);
2213                 if (!exists && next_del) {
2214                         exists = 1;
2215                         flags |= (FIEMAP_EXTENT_DELALLOC |
2216                                   FIEMAP_EXTENT_UNKNOWN);
2217                 }
2218                 up_read(&EXT4_I(inode)->i_data_sem);
2219
2220                 if (unlikely(es.es_len == 0)) {
2221                         EXT4_ERROR_INODE(inode, "es.es_len == 0");
2222                         err = -EIO;
2223                         break;
2224                 }
2225
2226                 /*
2227                  * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2228                  * we need to check next == EXT_MAX_BLOCKS because it is
2229                  * possible that an extent is with unwritten and delayed
2230                  * status due to when an extent is delayed allocated and
2231                  * is allocated by fallocate status tree will track both of
2232                  * them in a extent.
2233                  *
2234                  * So we could return a unwritten and delayed extent, and
2235                  * its block is equal to 'next'.
2236                  */
2237                 if (next == next_del && next == EXT_MAX_BLOCKS) {
2238                         flags |= FIEMAP_EXTENT_LAST;
2239                         if (unlikely(next_del != EXT_MAX_BLOCKS ||
2240                                      next != EXT_MAX_BLOCKS)) {
2241                                 EXT4_ERROR_INODE(inode,
2242                                                  "next extent == %u, next "
2243                                                  "delalloc extent = %u",
2244                                                  next, next_del);
2245                                 err = -EIO;
2246                                 break;
2247                         }
2248                 }
2249
2250                 if (exists) {
2251                         err = fiemap_fill_next_extent(fieinfo,
2252                                 (__u64)es.es_lblk << blksize_bits,
2253                                 (__u64)es.es_pblk << blksize_bits,
2254                                 (__u64)es.es_len << blksize_bits,
2255                                 flags);
2256                         if (err < 0)
2257                                 break;
2258                         if (err == 1) {
2259                                 err = 0;
2260                                 break;
2261                         }
2262                 }
2263
2264                 block = es.es_lblk + es.es_len;
2265         }
2266
2267         if (path) {
2268                 ext4_ext_drop_refs(path);
2269                 kfree(path);
2270         }
2271
2272         return err;
2273 }
2274
2275 /*
2276  * ext4_ext_put_gap_in_cache:
2277  * calculate boundaries of the gap that the requested block fits into
2278  * and cache this gap
2279  */
2280 static void
2281 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2282                                 ext4_lblk_t block)
2283 {
2284         int depth = ext_depth(inode);
2285         unsigned long len = 0;
2286         ext4_lblk_t lblock = 0;
2287         struct ext4_extent *ex;
2288
2289         ex = path[depth].p_ext;
2290         if (ex == NULL) {
2291                 /*
2292                  * there is no extent yet, so gap is [0;-] and we
2293                  * don't cache it
2294                  */
2295                 ext_debug("cache gap(whole file):");
2296         } else if (block < le32_to_cpu(ex->ee_block)) {
2297                 lblock = block;
2298                 len = le32_to_cpu(ex->ee_block) - block;
2299                 ext_debug("cache gap(before): %u [%u:%u]",
2300                                 block,
2301                                 le32_to_cpu(ex->ee_block),
2302                                  ext4_ext_get_actual_len(ex));
2303                 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2304                         ext4_es_insert_extent(inode, lblock, len, ~0,
2305                                               EXTENT_STATUS_HOLE);
2306         } else if (block >= le32_to_cpu(ex->ee_block)
2307                         + ext4_ext_get_actual_len(ex)) {
2308                 ext4_lblk_t next;
2309                 lblock = le32_to_cpu(ex->ee_block)
2310                         + ext4_ext_get_actual_len(ex);
2311
2312                 next = ext4_ext_next_allocated_block(path);
2313                 ext_debug("cache gap(after): [%u:%u] %u",
2314                                 le32_to_cpu(ex->ee_block),
2315                                 ext4_ext_get_actual_len(ex),
2316                                 block);
2317                 BUG_ON(next == lblock);
2318                 len = next - lblock;
2319                 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2320                         ext4_es_insert_extent(inode, lblock, len, ~0,
2321                                               EXTENT_STATUS_HOLE);
2322         } else {
2323                 BUG();
2324         }
2325
2326         ext_debug(" -> %u:%lu\n", lblock, len);
2327 }
2328
2329 /*
2330  * ext4_ext_rm_idx:
2331  * removes index from the index block.
2332  */
2333 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2334                         struct ext4_ext_path *path, int depth)
2335 {
2336         int err;
2337         ext4_fsblk_t leaf;
2338
2339         /* free index block */
2340         depth--;
2341         path = path + depth;
2342         leaf = ext4_idx_pblock(path->p_idx);
2343         if (unlikely(path->p_hdr->eh_entries == 0)) {
2344                 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2345                 return -EIO;
2346         }
2347         err = ext4_ext_get_access(handle, inode, path);
2348         if (err)
2349                 return err;
2350
2351         if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2352                 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2353                 len *= sizeof(struct ext4_extent_idx);
2354                 memmove(path->p_idx, path->p_idx + 1, len);
2355         }
2356
2357         le16_add_cpu(&path->p_hdr->eh_entries, -1);
2358         err = ext4_ext_dirty(handle, inode, path);
2359         if (err)
2360                 return err;
2361         ext_debug("index is empty, remove it, free block %llu\n", leaf);
2362         trace_ext4_ext_rm_idx(inode, leaf);
2363
2364         ext4_free_blocks(handle, inode, NULL, leaf, 1,
2365                          EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2366
2367         while (--depth >= 0) {
2368                 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2369                         break;
2370                 path--;
2371                 err = ext4_ext_get_access(handle, inode, path);
2372                 if (err)
2373                         break;
2374                 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2375                 err = ext4_ext_dirty(handle, inode, path);
2376                 if (err)
2377                         break;
2378         }
2379         return err;
2380 }
2381
2382 /*
2383  * ext4_ext_calc_credits_for_single_extent:
2384  * This routine returns max. credits that needed to insert an extent
2385  * to the extent tree.
2386  * When pass the actual path, the caller should calculate credits
2387  * under i_data_sem.
2388  */
2389 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2390                                                 struct ext4_ext_path *path)
2391 {
2392         if (path) {
2393                 int depth = ext_depth(inode);
2394                 int ret = 0;
2395
2396                 /* probably there is space in leaf? */
2397                 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2398                                 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2399
2400                         /*
2401                          *  There are some space in the leaf tree, no
2402                          *  need to account for leaf block credit
2403                          *
2404                          *  bitmaps and block group descriptor blocks
2405                          *  and other metadata blocks still need to be
2406                          *  accounted.
2407                          */
2408                         /* 1 bitmap, 1 block group descriptor */
2409                         ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2410                         return ret;
2411                 }
2412         }
2413
2414         return ext4_chunk_trans_blocks(inode, nrblocks);
2415 }
2416
2417 /*
2418  * How many index/leaf blocks need to change/allocate to add @extents extents?
2419  *
2420  * If we add a single extent, then in the worse case, each tree level
2421  * index/leaf need to be changed in case of the tree split.
2422  *
2423  * If more extents are inserted, they could cause the whole tree split more
2424  * than once, but this is really rare.
2425  */
2426 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2427 {
2428         int index;
2429         int depth;
2430
2431         /* If we are converting the inline data, only one is needed here. */
2432         if (ext4_has_inline_data(inode))
2433                 return 1;
2434
2435         depth = ext_depth(inode);
2436
2437         if (extents <= 1)
2438                 index = depth * 2;
2439         else
2440                 index = depth * 3;
2441
2442         return index;
2443 }
2444
2445 static inline int get_default_free_blocks_flags(struct inode *inode)
2446 {
2447         if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2448                 return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2449         else if (ext4_should_journal_data(inode))
2450                 return EXT4_FREE_BLOCKS_FORGET;
2451         return 0;
2452 }
2453
2454 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2455                               struct ext4_extent *ex,
2456                               long long *partial_cluster,
2457                               ext4_lblk_t from, ext4_lblk_t to)
2458 {
2459         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2460         unsigned short ee_len =  ext4_ext_get_actual_len(ex);
2461         ext4_fsblk_t pblk;
2462         int flags = get_default_free_blocks_flags(inode);
2463
2464         /*
2465          * For bigalloc file systems, we never free a partial cluster
2466          * at the beginning of the extent.  Instead, we make a note
2467          * that we tried freeing the cluster, and check to see if we
2468          * need to free it on a subsequent call to ext4_remove_blocks,
2469          * or at the end of the ext4_truncate() operation.
2470          */
2471         flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2472
2473         trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2474         /*
2475          * If we have a partial cluster, and it's different from the
2476          * cluster of the last block, we need to explicitly free the
2477          * partial cluster here.
2478          */
2479         pblk = ext4_ext_pblock(ex) + ee_len - 1;
2480         if ((*partial_cluster > 0) &&
2481             (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2482                 ext4_free_blocks(handle, inode, NULL,
2483                                  EXT4_C2B(sbi, *partial_cluster),
2484                                  sbi->s_cluster_ratio, flags);
2485                 *partial_cluster = 0;
2486         }
2487
2488 #ifdef EXTENTS_STATS
2489         {
2490                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2491                 spin_lock(&sbi->s_ext_stats_lock);
2492                 sbi->s_ext_blocks += ee_len;
2493                 sbi->s_ext_extents++;
2494                 if (ee_len < sbi->s_ext_min)
2495                         sbi->s_ext_min = ee_len;
2496                 if (ee_len > sbi->s_ext_max)
2497                         sbi->s_ext_max = ee_len;
2498                 if (ext_depth(inode) > sbi->s_depth_max)
2499                         sbi->s_depth_max = ext_depth(inode);
2500                 spin_unlock(&sbi->s_ext_stats_lock);
2501         }
2502 #endif
2503         if (from >= le32_to_cpu(ex->ee_block)
2504             && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2505                 /* tail removal */
2506                 ext4_lblk_t num;
2507                 unsigned int unaligned;
2508
2509                 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2510                 pblk = ext4_ext_pblock(ex) + ee_len - num;
2511                 /*
2512                  * Usually we want to free partial cluster at the end of the
2513                  * extent, except for the situation when the cluster is still
2514                  * used by any other extent (partial_cluster is negative).
2515                  */
2516                 if (*partial_cluster < 0 &&
2517                     -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
2518                         flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2519
2520                 ext_debug("free last %u blocks starting %llu partial %lld\n",
2521                           num, pblk, *partial_cluster);
2522                 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2523                 /*
2524                  * If the block range to be freed didn't start at the
2525                  * beginning of a cluster, and we removed the entire
2526                  * extent and the cluster is not used by any other extent,
2527                  * save the partial cluster here, since we might need to
2528                  * delete if we determine that the truncate operation has
2529                  * removed all of the blocks in the cluster.
2530                  *
2531                  * On the other hand, if we did not manage to free the whole
2532                  * extent, we have to mark the cluster as used (store negative
2533                  * cluster number in partial_cluster).
2534                  */
2535                 unaligned = EXT4_PBLK_COFF(sbi, pblk);
2536                 if (unaligned && (ee_len == num) &&
2537                     (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
2538                         *partial_cluster = EXT4_B2C(sbi, pblk);
2539                 else if (unaligned)
2540                         *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2541                 else if (*partial_cluster > 0)
2542                         *partial_cluster = 0;
2543         } else
2544                 ext4_error(sbi->s_sb, "strange request: removal(2) "
2545                            "%u-%u from %u:%u\n",
2546                            from, to, le32_to_cpu(ex->ee_block), ee_len);
2547         return 0;
2548 }
2549
2550
2551 /*
2552  * ext4_ext_rm_leaf() Removes the extents associated with the
2553  * blocks appearing between "start" and "end", and splits the extents
2554  * if "start" and "end" appear in the same extent
2555  *
2556  * @handle: The journal handle
2557  * @inode:  The files inode
2558  * @path:   The path to the leaf
2559  * @partial_cluster: The cluster which we'll have to free if all extents
2560  *                   has been released from it. It gets negative in case
2561  *                   that the cluster is still used.
2562  * @start:  The first block to remove
2563  * @end:   The last block to remove
2564  */
2565 static int
2566 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2567                  struct ext4_ext_path *path,
2568                  long long *partial_cluster,
2569                  ext4_lblk_t start, ext4_lblk_t end)
2570 {
2571         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2572         int err = 0, correct_index = 0;
2573         int depth = ext_depth(inode), credits;
2574         struct ext4_extent_header *eh;
2575         ext4_lblk_t a, b;
2576         unsigned num;
2577         ext4_lblk_t ex_ee_block;
2578         unsigned short ex_ee_len;
2579         unsigned uninitialized = 0;
2580         struct ext4_extent *ex;
2581         ext4_fsblk_t pblk;
2582
2583         /* the header must be checked already in ext4_ext_remove_space() */
2584         ext_debug("truncate since %u in leaf to %u\n", start, end);
2585         if (!path[depth].p_hdr)
2586                 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2587         eh = path[depth].p_hdr;
2588         if (unlikely(path[depth].p_hdr == NULL)) {
2589                 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2590                 return -EIO;
2591         }
2592         /* find where to start removing */
2593         ex = path[depth].p_ext;
2594         if (!ex)
2595                 ex = EXT_LAST_EXTENT(eh);
2596
2597         ex_ee_block = le32_to_cpu(ex->ee_block);
2598         ex_ee_len = ext4_ext_get_actual_len(ex);
2599
2600         /*
2601          * If we're starting with an extent other than the last one in the
2602          * node, we need to see if it shares a cluster with the extent to
2603          * the right (towards the end of the file). If its leftmost cluster
2604          * is this extent's rightmost cluster and it is not cluster aligned,
2605          * we'll mark it as a partial that is not to be deallocated.
2606          */
2607
2608         if (ex != EXT_LAST_EXTENT(eh)) {
2609                 ext4_fsblk_t current_pblk, right_pblk;
2610                 long long current_cluster, right_cluster;
2611
2612                 current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2613                 current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
2614                 right_pblk = ext4_ext_pblock(ex + 1);
2615                 right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
2616                 if (current_cluster == right_cluster &&
2617                         EXT4_PBLK_COFF(sbi, right_pblk))
2618                         *partial_cluster = -right_cluster;
2619         }
2620
2621         trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2622
2623         while (ex >= EXT_FIRST_EXTENT(eh) &&
2624                         ex_ee_block + ex_ee_len > start) {
2625
2626                 if (ext4_ext_is_uninitialized(ex))
2627                         uninitialized = 1;
2628                 else
2629                         uninitialized = 0;
2630
2631                 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2632                          uninitialized, ex_ee_len);
2633                 path[depth].p_ext = ex;
2634
2635                 a = ex_ee_block > start ? ex_ee_block : start;
2636                 b = ex_ee_block+ex_ee_len - 1 < end ?
2637                         ex_ee_block+ex_ee_len - 1 : end;
2638
2639                 ext_debug("  border %u:%u\n", a, b);
2640
2641                 /* If this extent is beyond the end of the hole, skip it */
2642                 if (end < ex_ee_block) {
2643                         /*
2644                          * We're going to skip this extent and move to another,
2645                          * so if this extent is not cluster aligned we have
2646                          * to mark the current cluster as used to avoid
2647                          * accidentally freeing it later on
2648                          */
2649                         pblk = ext4_ext_pblock(ex);
2650                         if (EXT4_PBLK_COFF(sbi, pblk))
2651                                 *partial_cluster =
2652                                         -((long long)EXT4_B2C(sbi, pblk));
2653                         ex--;
2654                         ex_ee_block = le32_to_cpu(ex->ee_block);
2655                         ex_ee_len = ext4_ext_get_actual_len(ex);
2656                         continue;
2657                 } else if (b != ex_ee_block + ex_ee_len - 1) {
2658                         EXT4_ERROR_INODE(inode,
2659                                          "can not handle truncate %u:%u "
2660                                          "on extent %u:%u",
2661                                          start, end, ex_ee_block,
2662                                          ex_ee_block + ex_ee_len - 1);
2663                         err = -EIO;
2664                         goto out;
2665                 } else if (a != ex_ee_block) {
2666                         /* remove tail of the extent */
2667                         num = a - ex_ee_block;
2668                 } else {
2669                         /* remove whole extent: excellent! */
2670                         num = 0;
2671                 }
2672                 /*
2673                  * 3 for leaf, sb, and inode plus 2 (bmap and group
2674                  * descriptor) for each block group; assume two block
2675                  * groups plus ex_ee_len/blocks_per_block_group for
2676                  * the worst case
2677                  */
2678                 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2679                 if (ex == EXT_FIRST_EXTENT(eh)) {
2680                         correct_index = 1;
2681                         credits += (ext_depth(inode)) + 1;
2682                 }
2683                 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2684
2685                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2686                 if (err)
2687                         goto out;
2688
2689                 err = ext4_ext_get_access(handle, inode, path + depth);
2690                 if (err)
2691                         goto out;
2692
2693                 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2694                                          a, b);
2695                 if (err)
2696                         goto out;
2697
2698                 if (num == 0)
2699                         /* this extent is removed; mark slot entirely unused */
2700                         ext4_ext_store_pblock(ex, 0);
2701
2702                 ex->ee_len = cpu_to_le16(num);
2703                 /*
2704                  * Do not mark uninitialized if all the blocks in the
2705                  * extent have been removed.
2706                  */
2707                 if (uninitialized && num)
2708                         ext4_ext_mark_uninitialized(ex);
2709                 /*
2710                  * If the extent was completely released,
2711                  * we need to remove it from the leaf
2712                  */
2713                 if (num == 0) {
2714                         if (end != EXT_MAX_BLOCKS - 1) {
2715                                 /*
2716                                  * For hole punching, we need to scoot all the
2717                                  * extents up when an extent is removed so that
2718                                  * we dont have blank extents in the middle
2719                                  */
2720                                 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2721                                         sizeof(struct ext4_extent));
2722
2723                                 /* Now get rid of the one at the end */
2724                                 memset(EXT_LAST_EXTENT(eh), 0,
2725                                         sizeof(struct ext4_extent));
2726                         }
2727                         le16_add_cpu(&eh->eh_entries, -1);
2728                 } else if (*partial_cluster > 0)
2729                         *partial_cluster = 0;
2730
2731                 err = ext4_ext_dirty(handle, inode, path + depth);
2732                 if (err)
2733                         goto out;
2734
2735                 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2736                                 ext4_ext_pblock(ex));
2737                 ex--;
2738                 ex_ee_block = le32_to_cpu(ex->ee_block);
2739                 ex_ee_len = ext4_ext_get_actual_len(ex);
2740         }
2741
2742         if (correct_index && eh->eh_entries)
2743                 err = ext4_ext_correct_indexes(handle, inode, path);
2744
2745         /*
2746          * If there's a partial cluster and at least one extent remains in
2747          * the leaf, free the partial cluster if it isn't shared with the
2748          * current extent.  If there's a partial cluster and no extents
2749          * remain in the leaf, it can't be freed here.  It can only be
2750          * freed when it's possible to determine if it's not shared with
2751          * any other extent - when the next leaf is processed or when space
2752          * removal is complete.
2753          */
2754         if (*partial_cluster > 0 && eh->eh_entries &&
2755             (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2756              *partial_cluster)) {
2757                 int flags = get_default_free_blocks_flags(inode);
2758
2759                 ext4_free_blocks(handle, inode, NULL,
2760                                  EXT4_C2B(sbi, *partial_cluster),
2761                                  sbi->s_cluster_ratio, flags);
2762                 *partial_cluster = 0;
2763         }
2764
2765         /* if this leaf is free, then we should
2766          * remove it from index block above */
2767         if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2768                 err = ext4_ext_rm_idx(handle, inode, path, depth);
2769
2770 out:
2771         return err;
2772 }
2773
2774 /*
2775  * ext4_ext_more_to_rm:
2776  * returns 1 if current index has to be freed (even partial)
2777  */
2778 static int
2779 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2780 {
2781         BUG_ON(path->p_idx == NULL);
2782
2783         if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2784                 return 0;
2785
2786         /*
2787          * if truncate on deeper level happened, it wasn't partial,
2788          * so we have to consider current index for truncation
2789          */
2790         if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2791                 return 0;
2792         return 1;
2793 }
2794
2795 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2796                           ext4_lblk_t end)
2797 {
2798         struct super_block *sb = inode->i_sb;
2799         int depth = ext_depth(inode);
2800         struct ext4_ext_path *path = NULL;
2801         long long partial_cluster = 0;
2802         handle_t *handle;
2803         int i = 0, err = 0;
2804
2805         ext_debug("truncate since %u to %u\n", start, end);
2806
2807         /* probably first extent we're gonna free will be last in block */
2808         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2809         if (IS_ERR(handle))
2810                 return PTR_ERR(handle);
2811
2812 again:
2813         trace_ext4_ext_remove_space(inode, start, end, depth);
2814
2815         /*
2816          * Check if we are removing extents inside the extent tree. If that
2817          * is the case, we are going to punch a hole inside the extent tree
2818          * so we have to check whether we need to split the extent covering
2819          * the last block to remove so we can easily remove the part of it
2820          * in ext4_ext_rm_leaf().
2821          */
2822         if (end < EXT_MAX_BLOCKS - 1) {
2823                 struct ext4_extent *ex;
2824                 ext4_lblk_t ee_block;
2825
2826                 /* find extent for this block */
2827                 path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2828                 if (IS_ERR(path)) {
2829                         ext4_journal_stop(handle);
2830                         return PTR_ERR(path);
2831                 }
2832                 depth = ext_depth(inode);
2833                 /* Leaf not may not exist only if inode has no blocks at all */
2834                 ex = path[depth].p_ext;
2835                 if (!ex) {
2836                         if (depth) {
2837                                 EXT4_ERROR_INODE(inode,
2838                                                  "path[%d].p_hdr == NULL",
2839                                                  depth);
2840                                 err = -EIO;
2841                         }
2842                         goto out;
2843                 }
2844
2845                 ee_block = le32_to_cpu(ex->ee_block);
2846
2847                 /*
2848                  * See if the last block is inside the extent, if so split
2849                  * the extent at 'end' block so we can easily remove the
2850                  * tail of the first part of the split extent in
2851                  * ext4_ext_rm_leaf().
2852                  */
2853                 if (end >= ee_block &&
2854                     end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2855                         int split_flag = 0;
2856
2857                         if (ext4_ext_is_uninitialized(ex))
2858                                 split_flag = EXT4_EXT_MARK_UNINIT1 |
2859                                              EXT4_EXT_MARK_UNINIT2;
2860
2861                         /*
2862                          * Split the extent in two so that 'end' is the last
2863                          * block in the first new extent. Also we should not
2864                          * fail removing space due to ENOSPC so try to use
2865                          * reserved block if that happens.
2866                          */
2867                         err = ext4_split_extent_at(handle, inode, path,
2868                                         end + 1, split_flag,
2869                                         EXT4_EX_NOCACHE |
2870                                         EXT4_GET_BLOCKS_PRE_IO |
2871                                         EXT4_GET_BLOCKS_METADATA_NOFAIL);
2872
2873                         if (err < 0)
2874                                 goto out;
2875                 }
2876         }
2877         /*
2878          * We start scanning from right side, freeing all the blocks
2879          * after i_size and walking into the tree depth-wise.
2880          */
2881         depth = ext_depth(inode);
2882         if (path) {
2883                 int k = i = depth;
2884                 while (--k > 0)
2885                         path[k].p_block =
2886                                 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2887         } else {
2888                 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2889                                GFP_NOFS);
2890                 if (path == NULL) {
2891                         ext4_journal_stop(handle);
2892                         return -ENOMEM;
2893                 }
2894                 path[0].p_depth = depth;
2895                 path[0].p_hdr = ext_inode_hdr(inode);
2896                 i = 0;
2897
2898                 if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
2899                         err = -EIO;
2900                         goto out;
2901                 }
2902         }
2903         err = 0;
2904
2905         while (i >= 0 && err == 0) {
2906                 if (i == depth) {
2907                         /* this is leaf block */
2908                         err = ext4_ext_rm_leaf(handle, inode, path,
2909                                                &partial_cluster, start,
2910                                                end);
2911                         /* root level has p_bh == NULL, brelse() eats this */
2912                         brelse(path[i].p_bh);
2913                         path[i].p_bh = NULL;
2914                         i--;
2915                         continue;
2916                 }
2917
2918                 /* this is index block */
2919                 if (!path[i].p_hdr) {
2920                         ext_debug("initialize header\n");
2921                         path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2922                 }
2923
2924                 if (!path[i].p_idx) {
2925                         /* this level hasn't been touched yet */
2926                         path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2927                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2928                         ext_debug("init index ptr: hdr 0x%p, num %d\n",
2929                                   path[i].p_hdr,
2930                                   le16_to_cpu(path[i].p_hdr->eh_entries));
2931                 } else {
2932                         /* we were already here, see at next index */
2933                         path[i].p_idx--;
2934                 }
2935
2936                 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2937                                 i, EXT_FIRST_INDEX(path[i].p_hdr),
2938                                 path[i].p_idx);
2939                 if (ext4_ext_more_to_rm(path + i)) {
2940                         struct buffer_head *bh;
2941                         /* go to the next level */
2942                         ext_debug("move to level %d (block %llu)\n",
2943                                   i + 1, ext4_idx_pblock(path[i].p_idx));
2944                         memset(path + i + 1, 0, sizeof(*path));
2945                         bh = read_extent_tree_block(inode,
2946                                 ext4_idx_pblock(path[i].p_idx), depth - i - 1,
2947                                 EXT4_EX_NOCACHE);
2948                         if (IS_ERR(bh)) {
2949                                 /* should we reset i_size? */
2950                                 err = PTR_ERR(bh);
2951                                 break;
2952                         }
2953                         /* Yield here to deal with large extent trees.
2954                          * Should be a no-op if we did IO above. */
2955                         cond_resched();
2956                         if (WARN_ON(i + 1 > depth)) {
2957                                 err = -EIO;
2958                                 break;
2959                         }
2960                         path[i + 1].p_bh = bh;
2961
2962                         /* save actual number of indexes since this
2963                          * number is changed at the next iteration */
2964                         path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2965                         i++;
2966                 } else {
2967                         /* we finished processing this index, go up */
2968                         if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2969                                 /* index is empty, remove it;
2970                                  * handle must be already prepared by the
2971                                  * truncatei_leaf() */
2972                                 err = ext4_ext_rm_idx(handle, inode, path, i);
2973                         }
2974                         /* root level has p_bh == NULL, brelse() eats this */
2975                         brelse(path[i].p_bh);
2976                         path[i].p_bh = NULL;
2977                         i--;
2978                         ext_debug("return to level %d\n", i);
2979                 }
2980         }
2981
2982         trace_ext4_ext_remove_space_done(inode, start, end, depth,
2983                         partial_cluster, path->p_hdr->eh_entries);
2984
2985         /* If we still have something in the partial cluster and we have removed
2986          * even the first extent, then we should free the blocks in the partial
2987          * cluster as well. */
2988         if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
2989                 int flags = get_default_free_blocks_flags(inode);
2990
2991                 ext4_free_blocks(handle, inode, NULL,
2992                                  EXT4_C2B(EXT4_SB(sb), partial_cluster),
2993                                  EXT4_SB(sb)->s_cluster_ratio, flags);
2994                 partial_cluster = 0;
2995         }
2996
2997         /* TODO: flexible tree reduction should be here */
2998         if (path->p_hdr->eh_entries == 0) {
2999                 /*
3000                  * truncate to zero freed all the tree,
3001                  * so we need to correct eh_depth
3002                  */
3003                 err = ext4_ext_get_access(handle, inode, path);
3004                 if (err == 0) {
3005                         ext_inode_hdr(inode)->eh_depth = 0;
3006                         ext_inode_hdr(inode)->eh_max =
3007                                 cpu_to_le16(ext4_ext_space_root(inode, 0));
3008                         err = ext4_ext_dirty(handle, inode, path);
3009                 }
3010         }
3011 out:
3012         ext4_ext_drop_refs(path);
3013         kfree(path);
3014         if (err == -EAGAIN) {
3015                 path = NULL;
3016                 goto again;
3017         }
3018         ext4_journal_stop(handle);
3019
3020         return err;
3021 }
3022
3023 /*
3024  * called at mount time
3025  */
3026 void ext4_ext_init(struct super_block *sb)
3027 {
3028         /*
3029          * possible initialization would be here
3030          */
3031
3032         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
3033 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
3034                 printk(KERN_INFO "EXT4-fs: file extents enabled"
3035 #ifdef AGGRESSIVE_TEST
3036                        ", aggressive tests"
3037 #endif
3038 #ifdef CHECK_BINSEARCH
3039                        ", check binsearch"
3040 #endif
3041 #ifdef EXTENTS_STATS
3042                        ", stats"
3043 #endif
3044                        "\n");
3045 #endif
3046 #ifdef EXTENTS_STATS
3047                 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
3048                 EXT4_SB(sb)->s_ext_min = 1 << 30;
3049                 EXT4_SB(sb)->s_ext_max = 0;
3050 #endif
3051         }
3052 }
3053
3054 /*
3055  * called at umount time
3056  */
3057 void ext4_ext_release(struct super_block *sb)
3058 {
3059         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3060                 return;
3061
3062 #ifdef EXTENTS_STATS
3063         if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
3064                 struct ext4_sb_info *sbi = EXT4_SB(sb);
3065                 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
3066                         sbi->s_ext_blocks, sbi->s_ext_extents,
3067                         sbi->s_ext_blocks / sbi->s_ext_extents);
3068                 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
3069                         sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
3070         }
3071 #endif
3072 }
3073
3074 static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
3075 {
3076         ext4_lblk_t  ee_block;
3077         ext4_fsblk_t ee_pblock;
3078         unsigned int ee_len;
3079
3080         ee_block  = le32_to_cpu(ex->ee_block);
3081         ee_len    = ext4_ext_get_actual_len(ex);
3082         ee_pblock = ext4_ext_pblock(ex);
3083
3084         if (ee_len == 0)
3085                 return 0;
3086
3087         return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
3088                                      EXTENT_STATUS_WRITTEN);
3089 }
3090
3091 /* FIXME!! we need to try to merge to left or right after zero-out  */
3092 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3093 {
3094         ext4_fsblk_t ee_pblock;
3095         unsigned int ee_len;
3096         int ret;
3097
3098         ee_len    = ext4_ext_get_actual_len(ex);
3099         ee_pblock = ext4_ext_pblock(ex);
3100
3101         ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
3102         if (ret > 0)
3103                 ret = 0;
3104
3105         return ret;
3106 }
3107
3108 /*
3109  * ext4_split_extent_at() splits an extent at given block.
3110  *
3111  * @handle: the journal handle
3112  * @inode: the file inode
3113  * @path: the path to the extent
3114  * @split: the logical block where the extent is splitted.
3115  * @split_flags: indicates if the extent could be zeroout if split fails, and
3116  *               the states(init or uninit) of new extents.
3117  * @flags: flags used to insert new extent to extent tree.
3118  *
3119  *
3120  * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
3121  * of which are deterimined by split_flag.
3122  *
3123  * There are two cases:
3124  *  a> the extent are splitted into two extent.
3125  *  b> split is not needed, and just mark the extent.
3126  *
3127  * return 0 on success.
3128  */
3129 static int ext4_split_extent_at(handle_t *handle,
3130                              struct inode *inode,
3131                              struct ext4_ext_path *path,
3132                              ext4_lblk_t split,
3133                              int split_flag,
3134                              int flags)
3135 {
3136         ext4_fsblk_t newblock;
3137         ext4_lblk_t ee_block;
3138         struct ext4_extent *ex, newex, orig_ex, zero_ex;
3139         struct ext4_extent *ex2 = NULL;
3140         unsigned int ee_len, depth;
3141         int err = 0;
3142
3143         BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3144                (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3145
3146         ext_debug("ext4_split_extents_at: inode %lu, logical"
3147                 "block %llu\n", inode->i_ino, (unsigned long long)split);
3148
3149         ext4_ext_show_leaf(inode, path);
3150
3151         depth = ext_depth(inode);
3152         ex = path[depth].p_ext;
3153         ee_block = le32_to_cpu(ex->ee_block);
3154         ee_len = ext4_ext_get_actual_len(ex);
3155         newblock = split - ee_block + ext4_ext_pblock(ex);
3156
3157         BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3158         BUG_ON(!ext4_ext_is_uninitialized(ex) &&
3159                split_flag & (EXT4_EXT_MAY_ZEROOUT |
3160                              EXT4_EXT_MARK_UNINIT1 |
3161                              EXT4_EXT_MARK_UNINIT2));
3162
3163         err = ext4_ext_get_access(handle, inode, path + depth);
3164         if (err)
3165                 goto out;
3166
3167         if (split == ee_block) {
3168                 /*
3169                  * case b: block @split is the block that the extent begins with
3170                  * then we just change the state of the extent, and splitting
3171                  * is not needed.
3172                  */
3173                 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3174                         ext4_ext_mark_uninitialized(ex);
3175                 else
3176                         ext4_ext_mark_initialized(ex);
3177
3178                 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3179                         ext4_ext_try_to_merge(handle, inode, path, ex);
3180
3181                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3182                 goto out;
3183         }
3184
3185         /* case a */
3186         memcpy(&orig_ex, ex, sizeof(orig_ex));
3187         ex->ee_len = cpu_to_le16(split - ee_block);
3188         if (split_flag & EXT4_EXT_MARK_UNINIT1)
3189                 ext4_ext_mark_uninitialized(ex);
3190
3191         /*
3192          * path may lead to new leaf, not to original leaf any more
3193          * after ext4_ext_insert_extent() returns,
3194          */
3195         err = ext4_ext_dirty(handle, inode, path + depth);
3196         if (err)
3197                 goto fix_extent_len;
3198
3199         ex2 = &newex;
3200         ex2->ee_block = cpu_to_le32(split);
3201         ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
3202         ext4_ext_store_pblock(ex2, newblock);
3203         if (split_flag & EXT4_EXT_MARK_UNINIT2)
3204                 ext4_ext_mark_uninitialized(ex2);
3205
3206         err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3207         if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3208                 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3209                         if (split_flag & EXT4_EXT_DATA_VALID1) {
3210                                 err = ext4_ext_zeroout(inode, ex2);
3211                                 zero_ex.ee_block = ex2->ee_block;
3212                                 zero_ex.ee_len = cpu_to_le16(
3213                                                 ext4_ext_get_actual_len(ex2));
3214                                 ext4_ext_store_pblock(&zero_ex,
3215                                                       ext4_ext_pblock(ex2));
3216                         } else {
3217                                 err = ext4_ext_zeroout(inode, ex);
3218                                 zero_ex.ee_block = ex->ee_block;
3219                                 zero_ex.ee_len = cpu_to_le16(
3220                                                 ext4_ext_get_actual_len(ex));
3221                                 ext4_ext_store_pblock(&zero_ex,
3222                                                       ext4_ext_pblock(ex));
3223                         }
3224                 } else {
3225                         err = ext4_ext_zeroout(inode, &orig_ex);
3226                         zero_ex.ee_block = orig_ex.ee_block;
3227                         zero_ex.ee_len = cpu_to_le16(
3228                                                 ext4_ext_get_actual_len(&orig_ex));
3229                         ext4_ext_store_pblock(&zero_ex,
3230                                               ext4_ext_pblock(&orig_ex));
3231                 }
3232
3233                 if (err)
3234                         goto fix_extent_len;
3235                 /* update the extent length and mark as initialized */
3236                 ex->ee_len = cpu_to_le16(ee_len);
3237                 ext4_ext_try_to_merge(handle, inode, path, ex);
3238                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3239                 if (err)
3240                         goto fix_extent_len;
3241
3242                 /* update extent status tree */
3243                 err = ext4_zeroout_es(inode, &zero_ex);
3244
3245                 goto out;
3246         } else if (err)
3247                 goto fix_extent_len;
3248
3249 out:
3250         ext4_ext_show_leaf(inode, path);
3251         return err;
3252
3253 fix_extent_len:
3254         ex->ee_len = orig_ex.ee_len;
3255         ext4_ext_dirty(handle, inode, path + depth);
3256         return err;
3257 }
3258
3259 /*
3260  * ext4_split_extents() splits an extent and mark extent which is covered
3261  * by @map as split_flags indicates
3262  *
3263  * It may result in splitting the extent into multiple extents (up to three)
3264  * There are three possibilities:
3265  *   a> There is no split required
3266  *   b> Splits in two extents: Split is happening at either end of the extent
3267  *   c> Splits in three extents: Somone is splitting in middle of the extent
3268  *
3269  */
3270 static int ext4_split_extent(handle_t *handle,
3271                               struct inode *inode,
3272                               struct ext4_ext_path *path,
3273                               struct ext4_map_blocks *map,
3274                               int split_flag,
3275                               int flags)
3276 {
3277         ext4_lblk_t ee_block;
3278         struct ext4_extent *ex;
3279         unsigned int ee_len, depth;
3280         int err = 0;
3281         int uninitialized;
3282         int split_flag1, flags1;
3283         int allocated = map->m_len;
3284
3285         depth = ext_depth(inode);
3286         ex = path[depth].p_ext;
3287         ee_block = le32_to_cpu(ex->ee_block);
3288         ee_len = ext4_ext_get_actual_len(ex);
3289         uninitialized = ext4_ext_is_uninitialized(ex);
3290
3291         if (map->m_lblk + map->m_len < ee_block + ee_len) {
3292                 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3293                 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3294                 if (uninitialized)
3295                         split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3296                                        EXT4_EXT_MARK_UNINIT2;
3297                 if (split_flag & EXT4_EXT_DATA_VALID2)
3298                         split_flag1 |= EXT4_EXT_DATA_VALID1;
3299                 err = ext4_split_extent_at(handle, inode, path,
3300                                 map->m_lblk + map->m_len, split_flag1, flags1);
3301                 if (err)
3302                         goto out;
3303         } else {
3304                 allocated = ee_len - (map->m_lblk - ee_block);
3305         }
3306         /*
3307          * Update path is required because previous ext4_split_extent_at() may
3308          * result in split of original leaf or extent zeroout.
3309          */
3310         ext4_ext_drop_refs(path);
3311         path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3312         if (IS_ERR(path))
3313                 return PTR_ERR(path);
3314         depth = ext_depth(inode);
3315         ex = path[depth].p_ext;
3316         uninitialized = ext4_ext_is_uninitialized(ex);
3317         split_flag1 = 0;
3318
3319         if (map->m_lblk >= ee_block) {
3320                 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3321                 if (uninitialized) {
3322                         split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3323                         split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3324                                                      EXT4_EXT_MARK_UNINIT2);
3325                 }
3326                 err = ext4_split_extent_at(handle, inode, path,
3327                                 map->m_lblk, split_flag1, flags);
3328                 if (err)
3329                         goto out;
3330         }
3331
3332         ext4_ext_show_leaf(inode, path);
3333 out:
3334         return err ? err : allocated;
3335 }
3336
3337 /*
3338  * This function is called by ext4_ext_map_blocks() if someone tries to write
3339  * to an uninitialized extent. It may result in splitting the uninitialized
3340  * extent into multiple extents (up to three - one initialized and two
3341  * uninitialized).
3342  * There are three possibilities:
3343  *   a> There is no split required: Entire extent should be initialized
3344  *   b> Splits in two extents: Write is happening at either end of the extent
3345  *   c> Splits in three extents: Somone is writing in middle of the extent
3346  *
3347  * Pre-conditions:
3348  *  - The extent pointed to by 'path' is uninitialized.
3349  *  - The extent pointed to by 'path' contains a superset
3350  *    of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3351  *
3352  * Post-conditions on success:
3353  *  - the returned value is the number of blocks beyond map->l_lblk
3354  *    that are allocated and initialized.
3355  *    It is guaranteed to be >= map->m_len.
3356  */
3357 static int ext4_ext_convert_to_initialized(handle_t *handle,
3358                                            struct inode *inode,
3359                                            struct ext4_map_blocks *map,
3360                                            struct ext4_ext_path *path,
3361                                            int flags)
3362 {
3363         struct ext4_sb_info *sbi;
3364         struct ext4_extent_header *eh;
3365         struct ext4_map_blocks split_map;
3366         struct ext4_extent zero_ex;
3367         struct ext4_extent *ex, *abut_ex;
3368         ext4_lblk_t ee_block, eof_block;
3369         unsigned int ee_len, depth, map_len = map->m_len;
3370         int allocated = 0, max_zeroout = 0;
3371         int err = 0;
3372         int split_flag = 0;
3373
3374         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3375                 "block %llu, max_blocks %u\n", inode->i_ino,
3376                 (unsigned long long)map->m_lblk, map_len);
3377
3378         sbi = EXT4_SB(inode->i_sb);
3379         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3380                 inode->i_sb->s_blocksize_bits;
3381         if (eof_block < map->m_lblk + map_len)
3382                 eof_block = map->m_lblk + map_len;
3383
3384         depth = ext_depth(inode);
3385         eh = path[depth].p_hdr;
3386         ex = path[depth].p_ext;
3387         ee_block = le32_to_cpu(ex->ee_block);
3388         ee_len = ext4_ext_get_actual_len(ex);
3389         zero_ex.ee_len = 0;
3390
3391         trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3392
3393         /* Pre-conditions */
3394         BUG_ON(!ext4_ext_is_uninitialized(ex));
3395         BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3396
3397         /*
3398          * Attempt to transfer newly initialized blocks from the currently
3399          * uninitialized extent to its neighbor. This is much cheaper
3400          * than an insertion followed by a merge as those involve costly
3401          * memmove() calls. Transferring to the left is the common case in
3402          * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3403          * followed by append writes.
3404          *
3405          * Limitations of the current logic:
3406          *  - L1: we do not deal with writes covering the whole extent.
3407          *    This would require removing the extent if the transfer
3408          *    is possible.
3409          *  - L2: we only attempt to merge with an extent stored in the
3410          *    same extent tree node.
3411          */
3412         if ((map->m_lblk == ee_block) &&
3413                 /* See if we can merge left */
3414                 (map_len < ee_len) &&           /*L1*/
3415                 (ex > EXT_FIRST_EXTENT(eh))) {  /*L2*/
3416                 ext4_lblk_t prev_lblk;
3417                 ext4_fsblk_t prev_pblk, ee_pblk;
3418                 unsigned int prev_len;
3419
3420                 abut_ex = ex - 1;
3421                 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3422                 prev_len = ext4_ext_get_actual_len(abut_ex);
3423                 prev_pblk = ext4_ext_pblock(abut_ex);
3424                 ee_pblk = ext4_ext_pblock(ex);
3425
3426                 /*
3427                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3428                  * upon those conditions:
3429                  * - C1: abut_ex is initialized,
3430                  * - C2: abut_ex is logically abutting ex,
3431                  * - C3: abut_ex is physically abutting ex,
3432                  * - C4: abut_ex can receive the additional blocks without
3433                  *   overflowing the (initialized) length limit.
3434                  */
3435                 if ((!ext4_ext_is_uninitialized(abut_ex)) &&            /*C1*/
3436                         ((prev_lblk + prev_len) == ee_block) &&         /*C2*/
3437                         ((prev_pblk + prev_len) == ee_pblk) &&          /*C3*/
3438                         (prev_len < (EXT_INIT_MAX_LEN - map_len))) {    /*C4*/
3439                         err = ext4_ext_get_access(handle, inode, path + depth);
3440                         if (err)
3441                                 goto out;
3442
3443                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3444                                 map, ex, abut_ex);
3445
3446                         /* Shift the start of ex by 'map_len' blocks */
3447                         ex->ee_block = cpu_to_le32(ee_block + map_len);
3448                         ext4_ext_store_pblock(ex, ee_pblk + map_len);
3449                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3450                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3451
3452                         /* Extend abut_ex by 'map_len' blocks */
3453                         abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3454
3455                         /* Result: number of initialized blocks past m_lblk */
3456                         allocated = map_len;
3457                 }
3458         } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3459                    (map_len < ee_len) &&        /*L1*/
3460                    ex < EXT_LAST_EXTENT(eh)) {  /*L2*/
3461                 /* See if we can merge right */
3462                 ext4_lblk_t next_lblk;
3463                 ext4_fsblk_t next_pblk, ee_pblk;
3464                 unsigned int next_len;
3465
3466                 abut_ex = ex + 1;
3467                 next_lblk = le32_to_cpu(abut_ex->ee_block);
3468                 next_len = ext4_ext_get_actual_len(abut_ex);
3469                 next_pblk = ext4_ext_pblock(abut_ex);
3470                 ee_pblk = ext4_ext_pblock(ex);
3471
3472                 /*
3473                  * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3474                  * upon those conditions:
3475                  * - C1: abut_ex is initialized,
3476                  * - C2: abut_ex is logically abutting ex,
3477                  * - C3: abut_ex is physically abutting ex,
3478                  * - C4: abut_ex can receive the additional blocks without
3479                  *   overflowing the (initialized) length limit.
3480                  */
3481                 if ((!ext4_ext_is_uninitialized(abut_ex)) &&            /*C1*/
3482                     ((map->m_lblk + map_len) == next_lblk) &&           /*C2*/
3483                     ((ee_pblk + ee_len) == next_pblk) &&                /*C3*/
3484                     (next_len < (EXT_INIT_MAX_LEN - map_len))) {        /*C4*/
3485                         err = ext4_ext_get_access(handle, inode, path + depth);
3486                         if (err)
3487                                 goto out;
3488
3489                         trace_ext4_ext_convert_to_initialized_fastpath(inode,
3490                                 map, ex, abut_ex);
3491
3492                         /* Shift the start of abut_ex by 'map_len' blocks */
3493                         abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3494                         ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3495                         ex->ee_len = cpu_to_le16(ee_len - map_len);
3496                         ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3497
3498                         /* Extend abut_ex by 'map_len' blocks */
3499                         abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3500
3501                         /* Result: number of initialized blocks past m_lblk */
3502                         allocated = map_len;
3503                 }
3504         }
3505         if (allocated) {
3506                 /* Mark the block containing both extents as dirty */
3507                 ext4_ext_dirty(handle, inode, path + depth);
3508
3509                 /* Update path to point to the right extent */
3510                 path[depth].p_ext = abut_ex;
3511                 goto out;
3512         } else
3513                 allocated = ee_len - (map->m_lblk - ee_block);
3514
3515         WARN_ON(map->m_lblk < ee_block);
3516         /*
3517          * It is safe to convert extent to initialized via explicit
3518          * zeroout only if extent is fully inside i_size or new_size.
3519          */
3520         split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3521
3522         if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3523                 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3524                         (inode->i_sb->s_blocksize_bits - 10);
3525
3526         /* If extent is less than s_max_zeroout_kb, zeroout directly */
3527         if (max_zeroout && (ee_len <= max_zeroout)) {
3528                 err = ext4_ext_zeroout(inode, ex);
3529                 if (err)
3530                         goto out;
3531                 zero_ex.ee_block = ex->ee_block;
3532                 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3533                 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3534
3535                 err = ext4_ext_get_access(handle, inode, path + depth);
3536                 if (err)
3537                         goto out;
3538                 ext4_ext_mark_initialized(ex);
3539                 ext4_ext_try_to_merge(handle, inode, path, ex);
3540                 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3541                 goto out;
3542         }
3543
3544         /*
3545          * four cases:
3546          * 1. split the extent into three extents.
3547          * 2. split the extent into two extents, zeroout the first half.
3548          * 3. split the extent into two extents, zeroout the second half.
3549          * 4. split the extent into two extents with out zeroout.
3550          */
3551         split_map.m_lblk = map->m_lblk;
3552         split_map.m_len = map->m_len;
3553
3554         if (max_zeroout && (allocated > map->m_len)) {
3555                 if (allocated <= max_zeroout) {
3556                         /* case 3 */
3557                         zero_ex.ee_block =
3558                                          cpu_to_le32(map->m_lblk);
3559                         zero_ex.ee_len = cpu_to_le16(allocated);
3560                         ext4_ext_store_pblock(&zero_ex,
3561                                 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3562                         err = ext4_ext_zeroout(inode, &zero_ex);
3563                         if (err)
3564                                 goto out;
3565                         split_map.m_lblk = map->m_lblk;
3566                         split_map.m_len = allocated;
3567                 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3568                         /* case 2 */
3569                         if (map->m_lblk != ee_block) {
3570                                 zero_ex.ee_block = ex->ee_block;
3571                                 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3572                                                         ee_block);
3573                                 ext4_ext_store_pblock(&zero_ex,
3574                                                       ext4_ext_pblock(ex));
3575                                 err = ext4_ext_zeroout(inode, &zero_ex);
3576                                 if (err)
3577                                         goto out;
3578                         }
3579
3580                         split_map.m_lblk = ee_block;
3581                         split_map.m_len = map->m_lblk - ee_block + map->m_len;
3582                         allocated = map->m_len;
3583                 }
3584         }
3585
3586         allocated = ext4_split_extent(handle, inode, path,
3587                                       &split_map, split_flag, flags);
3588         if (allocated < 0)
3589                 err = allocated;
3590
3591 out:
3592         /* If we have gotten a failure, don't zero out status tree */
3593         if (!err)
3594                 err = ext4_zeroout_es(inode, &zero_ex);
3595         return err ? err : allocated;
3596 }
3597
3598 /*
3599  * This function is called by ext4_ext_map_blocks() from
3600  * ext4_get_blocks_dio_write() when DIO to write
3601  * to an uninitialized extent.
3602  *
3603  * Writing to an uninitialized extent may result in splitting the uninitialized
3604  * extent into multiple initialized/uninitialized extents (up to three)
3605  * There are three possibilities:
3606  *   a> There is no split required: Entire extent should be uninitialized
3607  *   b> Splits in two extents: Write is happening at either end of the extent
3608  *   c> Splits in three extents: Somone is writing in middle of the extent
3609  *
3610  * This works the same way in the case of initialized -> unwritten conversion.
3611  *
3612  * One of more index blocks maybe needed if the extent tree grow after
3613  * the uninitialized extent split. To prevent ENOSPC occur at the IO
3614  * complete, we need to split the uninitialized extent before DIO submit
3615  * the IO. The uninitialized extent called at this time will be split
3616  * into three uninitialized extent(at most). After IO complete, the part
3617  * being filled will be convert to initialized by the end_io callback function
3618  * via ext4_convert_unwritten_extents().
3619  *
3620  * Returns the size of uninitialized extent to be written on success.
3621  */
3622 static int ext4_split_convert_extents(handle_t *handle,
3623                                         struct inode *inode,
3624                                         struct ext4_map_blocks *map,
3625                                         struct ext4_ext_path *path,
3626                                         int flags)
3627 {
3628         ext4_lblk_t eof_block;
3629         ext4_lblk_t ee_block;
3630         struct ext4_extent *ex;
3631         unsigned int ee_len;
3632         int split_flag = 0, depth;
3633
3634         ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
3635                   __func__, inode->i_ino,
3636                   (unsigned long long)map->m_lblk, map->m_len);
3637
3638         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3639                 inode->i_sb->s_blocksize_bits;
3640         if (eof_block < map->m_lblk + map->m_len)
3641                 eof_block = map->m_lblk + map->m_len;
3642         /*
3643          * It is safe to convert extent to initialized via explicit
3644          * zeroout only if extent is fully insde i_size or new_size.
3645          */
3646         depth = ext_depth(inode);
3647         ex = path[depth].p_ext;
3648         ee_block = le32_to_cpu(ex->ee_block);
3649         ee_len = ext4_ext_get_actual_len(ex);
3650
3651         /* Convert to unwritten */
3652         if (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
3653                 split_flag |= EXT4_EXT_DATA_VALID1;
3654         /* Convert to initialized */
3655         } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
3656                 split_flag |= ee_block + ee_len <= eof_block ?
3657                               EXT4_EXT_MAY_ZEROOUT : 0;
3658                 split_flag |= (EXT4_EXT_MARK_UNINIT2 | EXT4_EXT_DATA_VALID2);
3659         }
3660         flags |= EXT4_GET_BLOCKS_PRE_IO;
3661         return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3662 }
3663
3664 static int ext4_convert_initialized_extents(handle_t *handle,
3665                                             struct inode *inode,
3666                                             struct ext4_map_blocks *map,
3667                                             struct ext4_ext_path *path)
3668 {
3669         struct ext4_extent *ex;
3670         ext4_lblk_t ee_block;
3671         unsigned int ee_len;
3672         int depth;
3673         int err = 0;
3674
3675         depth = ext_depth(inode);
3676         ex = path[depth].p_ext;
3677         ee_block = le32_to_cpu(ex->ee_block);
3678         ee_len = ext4_ext_get_actual_len(ex);
3679
3680         ext_debug("%s: inode %lu, logical"
3681                 "block %llu, max_blocks %u\n", __func__, inode->i_ino,
3682                   (unsigned long long)ee_block, ee_len);
3683
3684         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3685                 err = ext4_split_convert_extents(handle, inode, map, path,
3686                                 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3687                 if (err < 0)
3688                         goto out;
3689                 ext4_ext_drop_refs(path);
3690                 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3691                 if (IS_ERR(path)) {
3692                         err = PTR_ERR(path);
3693                         goto out;
3694                 }
3695                 depth = ext_depth(inode);
3696                 ex = path[depth].p_ext;
3697         }
3698
3699         err = ext4_ext_get_access(handle, inode, path + depth);
3700         if (err)
3701                 goto out;
3702         /* first mark the extent as uninitialized */
3703         ext4_ext_mark_uninitialized(ex);
3704
3705         /* note: ext4_ext_correct_indexes() isn't needed here because
3706          * borders are not changed
3707          */
3708         ext4_ext_try_to_merge(handle, inode, path, ex);
3709
3710         /* Mark modified extent as dirty */
3711         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3712 out:
3713         ext4_ext_show_leaf(inode, path);
3714         return err;
3715 }
3716
3717
3718 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3719                                                 struct inode *inode,
3720                                                 struct ext4_map_blocks *map,
3721                                                 struct ext4_ext_path *path)
3722 {
3723         struct ext4_extent *ex;
3724         ext4_lblk_t ee_block;
3725         unsigned int ee_len;
3726         int depth;
3727         int err = 0;
3728
3729         depth = ext_depth(inode);
3730         ex = path[depth].p_ext;
3731         ee_block = le32_to_cpu(ex->ee_block);
3732         ee_len = ext4_ext_get_actual_len(ex);
3733
3734         ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3735                 "block %llu, max_blocks %u\n", inode->i_ino,
3736                   (unsigned long long)ee_block, ee_len);
3737
3738         /* If extent is larger than requested it is a clear sign that we still
3739          * have some extent state machine issues left. So extent_split is still
3740          * required.
3741          * TODO: Once all related issues will be fixed this situation should be
3742          * illegal.
3743          */
3744         if (ee_block != map->m_lblk || ee_len > map->m_len) {
3745 #ifdef EXT4_DEBUG
3746                 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3747                              " len %u; IO logical block %llu, len %u\n",
3748                              inode->i_ino, (unsigned long long)ee_block, ee_len,
3749                              (unsigned long long)map->m_lblk, map->m_len);
3750 #endif
3751                 err = ext4_split_convert_extents(handle, inode, map, path,
3752                                                  EXT4_GET_BLOCKS_CONVERT);
3753                 if (err < 0)
3754                         goto out;
3755                 ext4_ext_drop_refs(path);
3756                 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3757                 if (IS_ERR(path)) {
3758                         err = PTR_ERR(path);
3759                         goto out;
3760                 }
3761                 depth = ext_depth(inode);
3762                 ex = path[depth].p_ext;
3763         }
3764
3765         err = ext4_ext_get_access(handle, inode, path + depth);
3766         if (err)
3767                 goto out;
3768         /* first mark the extent as initialized */
3769         ext4_ext_mark_initialized(ex);
3770
3771         /* note: ext4_ext_correct_indexes() isn't needed here because
3772          * borders are not changed
3773          */
3774         ext4_ext_try_to_merge(handle, inode, path, ex);
3775
3776         /* Mark modified extent as dirty */
3777         err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3778 out:
3779         ext4_ext_show_leaf(inode, path);
3780         return err;
3781 }
3782
3783 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3784                         sector_t block, int count)
3785 {
3786         int i;
3787         for (i = 0; i < count; i++)
3788                 unmap_underlying_metadata(bdev, block + i);
3789 }
3790
3791 /*
3792  * Handle EOFBLOCKS_FL flag, clearing it if necessary
3793  */
3794 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3795                               ext4_lblk_t lblk,
3796                               struct ext4_ext_path *path,
3797                               unsigned int len)
3798 {
3799         int i, depth;
3800         struct ext4_extent_header *eh;
3801         struct ext4_extent *last_ex;
3802
3803         if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3804                 return 0;
3805
3806         depth = ext_depth(inode);
3807         eh = path[depth].p_hdr;
3808
3809         /*
3810          * We're going to remove EOFBLOCKS_FL entirely in future so we
3811          * do not care for this case anymore. Simply remove the flag
3812          * if there are no extents.
3813          */
3814         if (unlikely(!eh->eh_entries))
3815                 goto out;
3816         last_ex = EXT_LAST_EXTENT(eh);
3817         /*
3818          * We should clear the EOFBLOCKS_FL flag if we are writing the
3819          * last block in the last extent in the file.  We test this by
3820          * first checking to see if the caller to
3821          * ext4_ext_get_blocks() was interested in the last block (or
3822          * a block beyond the last block) in the current extent.  If
3823          * this turns out to be false, we can bail out from this
3824          * function immediately.
3825          */
3826         if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3827             ext4_ext_get_actual_len(last_ex))
3828                 return 0;
3829         /*
3830          * If the caller does appear to be planning to write at or
3831          * beyond the end of the current extent, we then test to see
3832          * if the current extent is the last extent in the file, by
3833          * checking to make sure it was reached via the rightmost node
3834          * at each level of the tree.
3835          */
3836         for (i = depth-1; i >= 0; i--)
3837                 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3838                         return 0;
3839 out:
3840         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3841         return ext4_mark_inode_dirty(handle, inode);
3842 }
3843
3844 /**
3845  * ext4_find_delalloc_range: find delayed allocated block in the given range.
3846  *
3847  * Return 1 if there is a delalloc block in the range, otherwise 0.
3848  */
3849 int ext4_find_delalloc_range(struct inode *inode,
3850                              ext4_lblk_t lblk_start,
3851                              ext4_lblk_t lblk_end)
3852 {
3853         struct extent_status es;
3854
3855         ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
3856         if (es.es_len == 0)
3857                 return 0; /* there is no delay extent in this tree */
3858         else if (es.es_lblk <= lblk_start &&
3859                  lblk_start < es.es_lblk + es.es_len)
3860                 return 1;
3861         else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
3862                 return 1;
3863         else
3864                 return 0;
3865 }
3866
3867 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3868 {
3869         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3870         ext4_lblk_t lblk_start, lblk_end;
3871         lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
3872         lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3873
3874         return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3875 }
3876
3877 /**
3878  * Determines how many complete clusters (out of those specified by the 'map')
3879  * are under delalloc and were reserved quota for.
3880  * This function is called when we are writing out the blocks that were
3881  * originally written with their allocation delayed, but then the space was
3882  * allocated using fallocate() before the delayed allocation could be resolved.
3883  * The cases to look for are:
3884  * ('=' indicated delayed allocated blocks
3885  *  '-' indicates non-delayed allocated blocks)
3886  * (a) partial clusters towards beginning and/or end outside of allocated range
3887  *     are not delalloc'ed.
3888  *      Ex:
3889  *      |----c---=|====c====|====c====|===-c----|
3890  *               |++++++ allocated ++++++|
3891  *      ==> 4 complete clusters in above example
3892  *
3893  * (b) partial cluster (outside of allocated range) towards either end is
3894  *     marked for delayed allocation. In this case, we will exclude that
3895  *     cluster.
3896  *      Ex:
3897  *      |----====c========|========c========|
3898  *           |++++++ allocated ++++++|
3899  *      ==> 1 complete clusters in above example
3900  *
3901  *      Ex:
3902  *      |================c================|
3903  *            |++++++ allocated ++++++|
3904  *      ==> 0 complete clusters in above example
3905  *
3906  * The ext4_da_update_reserve_space will be called only if we
3907  * determine here that there were some "entire" clusters that span
3908  * this 'allocated' range.
3909  * In the non-bigalloc case, this function will just end up returning num_blks
3910  * without ever calling ext4_find_delalloc_range.
3911  */
3912 static unsigned int
3913 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3914                            unsigned int num_blks)
3915 {
3916         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3917         ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3918         ext4_lblk_t lblk_from, lblk_to, c_offset;
3919         unsigned int allocated_clusters = 0;
3920
3921         alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3922         alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3923
3924         /* max possible clusters for this allocation */
3925         allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3926
3927         trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3928
3929         /* Check towards left side */
3930         c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
3931         if (c_offset) {
3932                 lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
3933                 lblk_to = lblk_from + c_offset - 1;
3934
3935                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3936                         allocated_clusters--;
3937         }
3938
3939         /* Now check towards right. */
3940         c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
3941         if (allocated_clusters && c_offset) {
3942                 lblk_from = lblk_start + num_blks;
3943                 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3944
3945                 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3946                         allocated_clusters--;
3947         }
3948
3949         return allocated_clusters;
3950 }
3951
3952 static int
3953 ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode,
3954                         struct ext4_map_blocks *map,
3955                         struct ext4_ext_path *path, int flags,
3956                         unsigned int allocated, ext4_fsblk_t newblock)
3957 {
3958         int ret = 0;
3959         int err = 0;
3960
3961         /*
3962          * Make sure that the extent is no bigger than we support with
3963          * uninitialized extent
3964          */
3965         if (map->m_len > EXT_UNINIT_MAX_LEN)
3966                 map->m_len = EXT_UNINIT_MAX_LEN / 2;
3967
3968         ret = ext4_convert_initialized_extents(handle, inode, map,
3969                                                 path);
3970         if (ret >= 0) {
3971                 ext4_update_inode_fsync_trans(handle, inode, 1);
3972                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
3973                                          path, map->m_len);
3974         } else
3975                 err = ret;
3976         map->m_flags |= EXT4_MAP_UNWRITTEN;
3977         if (allocated > map->m_len)
3978                 allocated = map->m_len;
3979         map->m_len = allocated;
3980
3981         return err ? err : allocated;
3982 }
3983
3984 static int
3985 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3986                         struct ext4_map_blocks *map,
3987                         struct ext4_ext_path *path, int flags,
3988                         unsigned int allocated, ext4_fsblk_t newblock)
3989 {
3990         int ret = 0;
3991         int err = 0;
3992         ext4_io_end_t *io = ext4_inode_aio(inode);
3993
3994         ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3995                   "block %llu, max_blocks %u, flags %x, allocated %u\n",
3996                   inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3997                   flags, allocated);
3998         ext4_ext_show_leaf(inode, path);
3999
4000         /*
4001          * When writing into uninitialized space, we should not fail to
4002          * allocate metadata blocks for the new extent block if needed.
4003          */
4004         flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
4005
4006         trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
4007                                                     allocated, newblock);
4008
4009         /* get_block() before submit the IO, split the extent */
4010         if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
4011                 ret = ext4_split_convert_extents(handle, inode, map,
4012                                          path, flags | EXT4_GET_BLOCKS_CONVERT);
4013                 if (ret <= 0)
4014                         goto out;
4015                 /*
4016                  * Flag the inode(non aio case) or end_io struct (aio case)
4017                  * that this IO needs to conversion to written when IO is
4018                  * completed
4019                  */
4020                 if (io)
4021                         ext4_set_io_unwritten_flag(inode, io);
4022                 else
4023                         ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
4024                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4025                 if (ext4_should_dioread_nolock(inode))
4026                         map->m_flags |= EXT4_MAP_UNINIT;
4027                 goto out;
4028         }
4029         /* IO end_io complete, convert the filled extent to written */
4030         if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
4031                 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
4032                                                         path);
4033                 if (ret >= 0) {
4034                         ext4_update_inode_fsync_trans(handle, inode, 1);
4035                         err = check_eofblocks_fl(handle, inode, map->m_lblk,
4036                                                  path, map->m_len);
4037                 } else
4038                         err = ret;
4039                 map->m_flags |= EXT4_MAP_MAPPED;
4040                 map->m_pblk = newblock;
4041                 if (allocated > map->m_len)
4042                         allocated = map->m_len;
4043                 map->m_len = allocated;
4044                 goto out2;
4045         }
4046         /* buffered IO case */
4047         /*
4048          * repeat fallocate creation request
4049          * we already have an unwritten extent
4050          */
4051         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
4052                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4053                 goto map_out;
4054         }
4055
4056         /* buffered READ or buffered write_begin() lookup */
4057         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4058                 /*
4059                  * We have blocks reserved already.  We
4060                  * return allocated blocks so that delalloc
4061                  * won't do block reservation for us.  But
4062                  * the buffer head will be unmapped so that
4063                  * a read from the block returns 0s.
4064                  */
4065                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4066                 goto out1;
4067         }
4068
4069         /* buffered write, writepage time, convert*/
4070         ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
4071         if (ret >= 0)
4072                 ext4_update_inode_fsync_trans(handle, inode, 1);
4073 out:
4074         if (ret <= 0) {
4075                 err = ret;
4076                 goto out2;
4077         } else
4078                 allocated = ret;
4079         map->m_flags |= EXT4_MAP_NEW;
4080         /*
4081          * if we allocated more blocks than requested
4082          * we need to make sure we unmap the extra block
4083          * allocated. The actual needed block will get
4084          * unmapped later when we find the buffer_head marked
4085          * new.
4086          */
4087         if (allocated > map->m_len) {
4088                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
4089                                         newblock + map->m_len,
4090                                         allocated - map->m_len);
4091                 allocated = map->m_len;
4092         }
4093         map->m_len = allocated;
4094
4095         /*
4096          * If we have done fallocate with the offset that is already
4097          * delayed allocated, we would have block reservation
4098          * and quota reservation done in the delayed write path.
4099          * But fallocate would have already updated quota and block
4100          * count for this offset. So cancel these reservation
4101          */
4102         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4103                 unsigned int reserved_clusters;
4104                 reserved_clusters = get_reserved_cluster_alloc(inode,
4105                                 map->m_lblk, map->m_len);
4106                 if (reserved_clusters)
4107                         ext4_da_update_reserve_space(inode,
4108                                                      reserved_clusters,
4109                                                      0);
4110         }
4111
4112 map_out:
4113         map->m_flags |= EXT4_MAP_MAPPED;
4114         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
4115                 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
4116                                          map->m_len);
4117                 if (err < 0)
4118                         goto out2;
4119         }
4120 out1:
4121         if (allocated > map->m_len)
4122                 allocated = map->m_len;
4123         ext4_ext_show_leaf(inode, path);
4124         map->m_pblk = newblock;
4125         map->m_len = allocated;
4126 out2:
4127         return err ? err : allocated;
4128 }
4129
4130 /*
4131  * get_implied_cluster_alloc - check to see if the requested
4132  * allocation (in the map structure) overlaps with a cluster already
4133  * allocated in an extent.
4134  *      @sb     The filesystem superblock structure
4135  *      @map    The requested lblk->pblk mapping
4136  *      @ex     The extent structure which might contain an implied
4137  *                      cluster allocation
4138  *
4139  * This function is called by ext4_ext_map_blocks() after we failed to
4140  * find blocks that were already in the inode's extent tree.  Hence,
4141  * we know that the beginning of the requested region cannot overlap
4142  * the extent from the inode's extent tree.  There are three cases we
4143  * want to catch.  The first is this case:
4144  *
4145  *               |--- cluster # N--|
4146  *    |--- extent ---|  |---- requested region ---|
4147  *                      |==========|
4148  *
4149  * The second case that we need to test for is this one:
4150  *
4151  *   |--------- cluster # N ----------------|
4152  *         |--- requested region --|   |------- extent ----|
4153  *         |=======================|
4154  *
4155  * The third case is when the requested region lies between two extents
4156  * within the same cluster:
4157  *          |------------- cluster # N-------------|
4158  * |----- ex -----|                  |---- ex_right ----|
4159  *                  |------ requested region ------|
4160  *                  |================|
4161  *
4162  * In each of the above cases, we need to set the map->m_pblk and
4163  * map->m_len so it corresponds to the return the extent labelled as
4164  * "|====|" from cluster #N, since it is already in use for data in
4165  * cluster EXT4_B2C(sbi, map->m_lblk).  We will then return 1 to
4166  * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
4167  * as a new "allocated" block region.  Otherwise, we will return 0 and
4168  * ext4_ext_map_blocks() will then allocate one or more new clusters
4169  * by calling ext4_mb_new_blocks().
4170  */
4171 static int get_implied_cluster_alloc(struct super_block *sb,
4172                                      struct ext4_map_blocks *map,
4173                                      struct ext4_extent *ex,
4174                                      struct ext4_ext_path *path)
4175 {
4176         struct ext4_sb_info *sbi = EXT4_SB(sb);
4177         ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4178         ext4_lblk_t ex_cluster_start, ex_cluster_end;
4179         ext4_lblk_t rr_cluster_start;
4180         ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4181         ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4182         unsigned short ee_len = ext4_ext_get_actual_len(ex);
4183
4184         /* The extent passed in that we are trying to match */
4185         ex_cluster_start = EXT4_B2C(sbi, ee_block);
4186         ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
4187
4188         /* The requested region passed into ext4_map_blocks() */
4189         rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
4190
4191         if ((rr_cluster_start == ex_cluster_end) ||
4192             (rr_cluster_start == ex_cluster_start)) {
4193                 if (rr_cluster_start == ex_cluster_end)
4194                         ee_start += ee_len - 1;
4195                 map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
4196                 map->m_len = min(map->m_len,
4197                                  (unsigned) sbi->s_cluster_ratio - c_offset);
4198                 /*
4199                  * Check for and handle this case:
4200                  *
4201                  *   |--------- cluster # N-------------|
4202                  *                     |------- extent ----|
4203                  *         |--- requested region ---|
4204                  *         |===========|
4205                  */
4206
4207                 if (map->m_lblk < ee_block)
4208                         map->m_len = min(map->m_len, ee_block - map->m_lblk);
4209
4210                 /*
4211                  * Check for the case where there is already another allocated
4212                  * block to the right of 'ex' but before the end of the cluster.
4213                  *
4214                  *          |------------- cluster # N-------------|
4215                  * |----- ex -----|                  |---- ex_right ----|
4216                  *                  |------ requested region ------|
4217                  *                  |================|
4218                  */
4219                 if (map->m_lblk > ee_block) {
4220                         ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4221                         map->m_len = min(map->m_len, next - map->m_lblk);
4222                 }
4223
4224                 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4225                 return 1;
4226         }
4227
4228         trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4229         return 0;
4230 }
4231
4232
4233 /*
4234  * Block allocation/map/preallocation routine for extents based files
4235  *
4236  *
4237  * Need to be called with
4238  * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4239  * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4240  *
4241  * return > 0, number of of blocks already mapped/allocated
4242  *          if create == 0 and these are pre-allocated blocks
4243  *              buffer head is unmapped
4244  *          otherwise blocks are mapped
4245  *
4246  * return = 0, if plain look up failed (blocks have not been allocated)
4247  *          buffer head is unmapped
4248  *
4249  * return < 0, error case.
4250  */
4251 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4252                         struct ext4_map_blocks *map, int flags)
4253 {
4254         struct ext4_ext_path *path = NULL;
4255         struct ext4_extent newex, *ex, *ex2;
4256         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4257         ext4_fsblk_t newblock = 0;
4258         int free_on_err = 0, err = 0, depth, ret;
4259         unsigned int allocated = 0, offset = 0;
4260         unsigned int allocated_clusters = 0;
4261         struct ext4_allocation_request ar;
4262         ext4_io_end_t *io = ext4_inode_aio(inode);
4263         ext4_lblk_t cluster_offset;
4264         int set_unwritten = 0;
4265
4266         ext_debug("blocks %u/%u requested for inode %lu\n",
4267                   map->m_lblk, map->m_len, inode->i_ino);
4268         trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4269
4270         /* find extent for this block */
4271         path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0);
4272         if (IS_ERR(path)) {
4273                 err = PTR_ERR(path);
4274                 path = NULL;
4275                 goto out2;
4276         }
4277
4278         depth = ext_depth(inode);
4279
4280         /*
4281          * consistent leaf must not be empty;
4282          * this situation is possible, though, _during_ tree modification;
4283          * this is why assert can't be put in ext4_ext_find_extent()
4284          */
4285         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4286                 EXT4_ERROR_INODE(inode, "bad extent address "
4287                                  "lblock: %lu, depth: %d pblock %lld",
4288                                  (unsigned long) map->m_lblk, depth,
4289                                  path[depth].p_block);
4290                 err = -EIO;
4291                 goto out2;
4292         }
4293
4294         ex = path[depth].p_ext;
4295         if (ex) {
4296                 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4297                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4298                 unsigned short ee_len;
4299
4300
4301                 /*
4302                  * Uninitialized extents are treated as holes, except that
4303                  * we split out initialized portions during a write.
4304                  */
4305                 ee_len = ext4_ext_get_actual_len(ex);
4306
4307                 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4308
4309                 /* if found extent covers block, simply return it */
4310                 if (in_range(map->m_lblk, ee_block, ee_len)) {
4311                         newblock = map->m_lblk - ee_block + ee_start;
4312                         /* number of remaining blocks in the extent */
4313                         allocated = ee_len - (map->m_lblk - ee_block);
4314                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4315                                   ee_block, ee_len, newblock);
4316
4317                         /*
4318                          * If the extent is initialized check whether the
4319                          * caller wants to convert it to unwritten.
4320                          */
4321                         if ((!ext4_ext_is_uninitialized(ex)) &&
4322                             (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4323                                 allocated = ext4_ext_convert_initialized_extent(
4324                                                 handle, inode, map, path, flags,
4325                                                 allocated, newblock);
4326                                 goto out2;
4327                         } else if (!ext4_ext_is_uninitialized(ex))
4328                                 goto out;
4329
4330                         ret = ext4_ext_handle_uninitialized_extents(
4331                                 handle, inode, map, path, flags,
4332                                 allocated, newblock);
4333                         if (ret < 0)
4334                                 err = ret;
4335                         else
4336                                 allocated = ret;
4337                         goto out2;
4338                 }
4339         }
4340
4341         if ((sbi->s_cluster_ratio > 1) &&
4342             ext4_find_delalloc_cluster(inode, map->m_lblk))
4343                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4344
4345         /*
4346          * requested block isn't allocated yet;
4347          * we couldn't try to create block if create flag is zero
4348          */
4349         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4350                 /*
4351                  * put just found gap into cache to speed up
4352                  * subsequent requests
4353                  */
4354                 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
4355                         ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4356                 goto out2;
4357         }
4358
4359         /*
4360          * Okay, we need to do block allocation.
4361          */
4362         map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4363         newex.ee_block = cpu_to_le32(map->m_lblk);
4364         cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4365
4366         /*
4367          * If we are doing bigalloc, check to see if the extent returned
4368          * by ext4_ext_find_extent() implies a cluster we can use.
4369          */
4370         if (cluster_offset && ex &&
4371             get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4372                 ar.len = allocated = map->m_len;
4373                 newblock = map->m_pblk;
4374                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4375                 goto got_allocated_blocks;
4376         }
4377
4378         /* find neighbour allocated blocks */
4379         ar.lleft = map->m_lblk;
4380         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4381         if (err)
4382                 goto out2;
4383         ar.lright = map->m_lblk;
4384         ex2 = NULL;
4385         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4386         if (err)
4387                 goto out2;
4388
4389         /* Check if the extent after searching to the right implies a
4390          * cluster we can use. */
4391         if ((sbi->s_cluster_ratio > 1) && ex2 &&
4392             get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4393                 ar.len = allocated = map->m_len;
4394                 newblock = map->m_pblk;
4395                 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4396                 goto got_allocated_blocks;
4397         }
4398
4399         /*
4400          * See if request is beyond maximum number of blocks we can have in
4401          * a single extent. For an initialized extent this limit is
4402          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4403          * EXT_UNINIT_MAX_LEN.
4404          */
4405         if (map->m_len > EXT_INIT_MAX_LEN &&
4406             !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4407                 map->m_len = EXT_INIT_MAX_LEN;
4408         else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4409                  (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4410                 map->m_len = EXT_UNINIT_MAX_LEN;
4411
4412         /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4413         newex.ee_len = cpu_to_le16(map->m_len);
4414         err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4415         if (err)
4416                 allocated = ext4_ext_get_actual_len(&newex);
4417         else
4418                 allocated = map->m_len;
4419
4420         /* allocate new block */
4421         ar.inode = inode;
4422         ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4423         ar.logical = map->m_lblk;
4424         /*
4425          * We calculate the offset from the beginning of the cluster
4426          * for the logical block number, since when we allocate a
4427          * physical cluster, the physical block should start at the
4428          * same offset from the beginning of the cluster.  This is
4429          * needed so that future calls to get_implied_cluster_alloc()
4430          * work correctly.
4431          */
4432         offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4433         ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4434         ar.goal -= offset;
4435         ar.logical -= offset;
4436         if (S_ISREG(inode->i_mode))
4437                 ar.flags = EXT4_MB_HINT_DATA;
4438         else
4439                 /* disable in-core preallocation for non-regular files */
4440                 ar.flags = 0;
4441         if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4442                 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4443         newblock = ext4_mb_new_blocks(handle, &ar, &err);
4444         if (!newblock)
4445                 goto out2;
4446         ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4447                   ar.goal, newblock, allocated);
4448         free_on_err = 1;
4449         allocated_clusters = ar.len;
4450         ar.len = EXT4_C2B(sbi, ar.len) - offset;
4451         if (ar.len > allocated)
4452                 ar.len = allocated;
4453
4454 got_allocated_blocks:
4455         /* try to insert new extent into found leaf and return */
4456         ext4_ext_store_pblock(&newex, newblock + offset);
4457         newex.ee_len = cpu_to_le16(ar.len);
4458         /* Mark uninitialized */
4459         if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4460                 ext4_ext_mark_uninitialized(&newex);
4461                 map->m_flags |= EXT4_MAP_UNWRITTEN;
4462                 /*
4463                  * io_end structure was created for every IO write to an
4464                  * uninitialized extent. To avoid unnecessary conversion,
4465                  * here we flag the IO that really needs the conversion.
4466                  * For non asycn direct IO case, flag the inode state
4467                  * that we need to perform conversion when IO is done.
4468                  */
4469                 if ((flags & EXT4_GET_BLOCKS_PRE_IO))
4470                         set_unwritten = 1;
4471                 if (ext4_should_dioread_nolock(inode))
4472                         map->m_flags |= EXT4_MAP_UNINIT;
4473         }
4474
4475         err = 0;
4476         if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4477                 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4478                                          path, ar.len);
4479         if (!err)
4480                 err = ext4_ext_insert_extent(handle, inode, path,
4481                                              &newex, flags);
4482
4483         if (!err && set_unwritten) {
4484                 if (io)
4485                         ext4_set_io_unwritten_flag(inode, io);
4486                 else
4487                         ext4_set_inode_state(inode,
4488                                              EXT4_STATE_DIO_UNWRITTEN);
4489         }
4490
4491         if (err && free_on_err) {
4492                 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4493                         EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4494                 /* free data blocks we just allocated */
4495                 /* not a good idea to call discard here directly,
4496                  * but otherwise we'd need to call it every free() */
4497                 ext4_discard_preallocations(inode);
4498                 ext4_free_blocks(handle, inode, NULL, newblock,
4499                                  EXT4_C2B(sbi, allocated_clusters), fb_flags);
4500                 goto out2;
4501         }
4502
4503         /* previous routine could use block we allocated */
4504         newblock = ext4_ext_pblock(&newex);
4505         allocated = ext4_ext_get_actual_len(&newex);
4506         if (allocated > map->m_len)
4507                 allocated = map->m_len;
4508         map->m_flags |= EXT4_MAP_NEW;
4509
4510         /*
4511          * Update reserved blocks/metadata blocks after successful
4512          * block allocation which had been deferred till now.
4513          */
4514         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4515                 unsigned int reserved_clusters;
4516                 /*
4517                  * Check how many clusters we had reserved this allocated range
4518                  */
4519                 reserved_clusters = get_reserved_cluster_alloc(inode,
4520                                                 map->m_lblk, allocated);
4521                 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4522                         if (reserved_clusters) {
4523                                 /*
4524                                  * We have clusters reserved for this range.
4525                                  * But since we are not doing actual allocation
4526                                  * and are simply using blocks from previously
4527                                  * allocated cluster, we should release the
4528                                  * reservation and not claim quota.
4529                                  */
4530                                 ext4_da_update_reserve_space(inode,
4531                                                 reserved_clusters, 0);
4532                         }
4533                 } else {
4534                         BUG_ON(allocated_clusters < reserved_clusters);
4535                         if (reserved_clusters < allocated_clusters) {
4536                                 struct ext4_inode_info *ei = EXT4_I(inode);
4537                                 int reservation = allocated_clusters -
4538                                                   reserved_clusters;
4539                                 /*
4540                                  * It seems we claimed few clusters outside of
4541                                  * the range of this allocation. We should give
4542                                  * it back to the reservation pool. This can
4543                                  * happen in the following case:
4544                                  *
4545                                  * * Suppose s_cluster_ratio is 4 (i.e., each
4546                                  *   cluster has 4 blocks. Thus, the clusters
4547                                  *   are [0-3],[4-7],[8-11]...
4548                                  * * First comes delayed allocation write for
4549                                  *   logical blocks 10 & 11. Since there were no
4550                                  *   previous delayed allocated blocks in the
4551                                  *   range [8-11], we would reserve 1 cluster
4552                                  *   for this write.
4553                                  * * Next comes write for logical blocks 3 to 8.
4554                                  *   In this case, we will reserve 2 clusters
4555                                  *   (for [0-3] and [4-7]; and not for [8-11] as
4556                                  *   that range has a delayed allocated blocks.
4557                                  *   Thus total reserved clusters now becomes 3.
4558                                  * * Now, during the delayed allocation writeout
4559                                  *   time, we will first write blocks [3-8] and
4560                                  *   allocate 3 clusters for writing these
4561                                  *   blocks. Also, we would claim all these
4562                                  *   three clusters above.
4563                                  * * Now when we come here to writeout the
4564                                  *   blocks [10-11], we would expect to claim
4565                                  *   the reservation of 1 cluster we had made
4566                                  *   (and we would claim it since there are no
4567                                  *   more delayed allocated blocks in the range
4568                                  *   [8-11]. But our reserved cluster count had
4569                                  *   already gone to 0.
4570                                  *
4571                                  *   Thus, at the step 4 above when we determine
4572                                  *   that there are still some unwritten delayed
4573                                  *   allocated blocks outside of our current
4574                                  *   block range, we should increment the
4575                                  *   reserved clusters count so that when the
4576                                  *   remaining blocks finally gets written, we
4577                                  *   could claim them.
4578                                  */
4579                                 dquot_reserve_block(inode,
4580                                                 EXT4_C2B(sbi, reservation));
4581                                 spin_lock(&ei->i_block_reservation_lock);
4582                                 ei->i_reserved_data_blocks += reservation;
4583                                 spin_unlock(&ei->i_block_reservation_lock);
4584                         }
4585                         /*
4586                          * We will claim quota for all newly allocated blocks.
4587                          * We're updating the reserved space *after* the
4588                          * correction above so we do not accidentally free
4589                          * all the metadata reservation because we might
4590                          * actually need it later on.
4591                          */
4592                         ext4_da_update_reserve_space(inode, allocated_clusters,
4593                                                         1);
4594                 }
4595         }
4596
4597         /*
4598          * Cache the extent and update transaction to commit on fdatasync only
4599          * when it is _not_ an uninitialized extent.
4600          */
4601         if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
4602                 ext4_update_inode_fsync_trans(handle, inode, 1);
4603         else
4604                 ext4_update_inode_fsync_trans(handle, inode, 0);
4605 out:
4606         if (allocated > map->m_len)
4607                 allocated = map->m_len;
4608         ext4_ext_show_leaf(inode, path);
4609         map->m_flags |= EXT4_MAP_MAPPED;
4610         map->m_pblk = newblock;
4611         map->m_len = allocated;
4612 out2:
4613         if (path) {
4614                 ext4_ext_drop_refs(path);
4615                 kfree(path);
4616         }
4617
4618         trace_ext4_ext_map_blocks_exit(inode, flags, map,
4619                                        err ? err : allocated);
4620         ext4_es_lru_add(inode);
4621         return err ? err : allocated;
4622 }
4623
4624 void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4625 {
4626         struct super_block *sb = inode->i_sb;
4627         ext4_lblk_t last_block;
4628         int err = 0;
4629
4630         /*
4631          * TODO: optimization is possible here.
4632          * Probably we need not scan at all,
4633          * because page truncation is enough.
4634          */
4635
4636         /* we have to know where to truncate from in crash case */
4637         EXT4_I(inode)->i_disksize = inode->i_size;
4638         ext4_mark_inode_dirty(handle, inode);
4639
4640         last_block = (inode->i_size + sb->s_blocksize - 1)
4641                         >> EXT4_BLOCK_SIZE_BITS(sb);
4642 retry:
4643         err = ext4_es_remove_extent(inode, last_block,
4644                                     EXT_MAX_BLOCKS - last_block);
4645         if (err == -ENOMEM) {
4646                 cond_resched();
4647                 congestion_wait(BLK_RW_ASYNC, HZ/50);
4648                 goto retry;
4649         }
4650         if (err) {
4651                 ext4_std_error(inode->i_sb, err);
4652                 return;
4653         }
4654         err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4655         ext4_std_error(inode->i_sb, err);
4656 }
4657
4658 static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
4659                                   ext4_lblk_t len, int flags, int mode)
4660 {
4661         struct inode *inode = file_inode(file);
4662         handle_t *handle;
4663         int ret = 0;
4664         int ret2 = 0;
4665         int retries = 0;
4666         struct ext4_map_blocks map;
4667         unsigned int credits;
4668
4669         map.m_lblk = offset;
4670         /*
4671          * Don't normalize the request if it can fit in one extent so
4672          * that it doesn't get unnecessarily split into multiple
4673          * extents.
4674          */
4675         if (len <= EXT_UNINIT_MAX_LEN)
4676                 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4677
4678         /*
4679          * credits to insert 1 extent into extent tree
4680          */
4681         credits = ext4_chunk_trans_blocks(inode, len);
4682
4683 retry:
4684         while (ret >= 0 && ret < len) {
4685                 map.m_lblk = map.m_lblk + ret;
4686                 map.m_len = len = len - ret;
4687                 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4688                                             credits);
4689                 if (IS_ERR(handle)) {
4690                         ret = PTR_ERR(handle);
4691                         break;
4692                 }
4693                 ret = ext4_map_blocks(handle, inode, &map, flags);
4694                 if (ret <= 0) {
4695                         ext4_debug("inode #%lu: block %u: len %u: "
4696                                    "ext4_ext_map_blocks returned %d",
4697                                    inode->i_ino, map.m_lblk,
4698                                    map.m_len, ret);
4699                         ext4_mark_inode_dirty(handle, inode);
4700                         ret2 = ext4_journal_stop(handle);
4701                         break;
4702                 }
4703                 ret2 = ext4_journal_stop(handle);
4704                 if (ret2)
4705                         break;
4706         }
4707         if (ret == -ENOSPC &&
4708                         ext4_should_retry_alloc(inode->i_sb, &retries)) {
4709                 ret = 0;
4710                 goto retry;
4711         }
4712
4713         return ret > 0 ? ret2 : ret;
4714 }
4715
4716 static long ext4_zero_range(struct file *file, loff_t offset,
4717                             loff_t len, int mode)
4718 {
4719         struct inode *inode = file_inode(file);
4720         handle_t *handle = NULL;
4721         unsigned int max_blocks;
4722         loff_t new_size = 0;
4723         int ret = 0;
4724         int flags;
4725         int partial;
4726         loff_t start, end;
4727         ext4_lblk_t lblk;
4728         struct address_space *mapping = inode->i_mapping;
4729         unsigned int blkbits = inode->i_blkbits;
4730
4731         trace_ext4_zero_range(inode, offset, len, mode);
4732
4733         /*
4734          * Write out all dirty pages to avoid race conditions
4735          * Then release them.
4736          */
4737         if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4738                 ret = filemap_write_and_wait_range(mapping, offset,
4739                                                    offset + len - 1);
4740                 if (ret)
4741                         return ret;
4742         }
4743
4744         /*
4745          * Round up offset. This is not fallocate, we neet to zero out
4746          * blocks, so convert interior block aligned part of the range to
4747          * unwritten and possibly manually zero out unaligned parts of the
4748          * range.
4749          */
4750         start = round_up(offset, 1 << blkbits);
4751         end = round_down((offset + len), 1 << blkbits);
4752
4753         if (start < offset || end > offset + len)
4754                 return -EINVAL;
4755         partial = (offset + len) & ((1 << blkbits) - 1);
4756
4757         lblk = start >> blkbits;
4758         max_blocks = (end >> blkbits);
4759         if (max_blocks < lblk)
4760                 max_blocks = 0;
4761         else
4762                 max_blocks -= lblk;
4763
4764         flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT |
4765                 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN;
4766         if (mode & FALLOC_FL_KEEP_SIZE)
4767                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4768
4769         mutex_lock(&inode->i_mutex);
4770
4771         /*
4772          * Indirect files do not support unwritten extnets
4773          */
4774         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4775                 ret = -EOPNOTSUPP;
4776                 goto out_mutex;
4777         }
4778
4779         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4780              offset + len > i_size_read(inode)) {
4781                 new_size = offset + len;
4782                 ret = inode_newsize_ok(inode, new_size);
4783                 if (ret)
4784                         goto out_mutex;
4785                 /*
4786                  * If we have a partial block after EOF we have to allocate
4787                  * the entire block.
4788                  */
4789                 if (partial)
4790                         max_blocks += 1;
4791         }
4792
4793         if (max_blocks > 0) {
4794
4795                 /* Now release the pages and zero block aligned part of pages*/
4796                 truncate_pagecache_range(inode, start, end - 1);
4797
4798                 /* Wait all existing dio workers, newcomers will block on i_mutex */
4799                 ext4_inode_block_unlocked_dio(inode);
4800                 inode_dio_wait(inode);
4801
4802                 /*
4803                  * Remove entire range from the extent status tree.
4804                  */
4805                 ret = ext4_es_remove_extent(inode, lblk, max_blocks);
4806                 if (ret)
4807                         goto out_dio;
4808
4809                 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags,
4810                                              mode);
4811                 if (ret)
4812                         goto out_dio;
4813         }
4814
4815         handle = ext4_journal_start(inode, EXT4_HT_MISC, 4);
4816         if (IS_ERR(handle)) {
4817                 ret = PTR_ERR(handle);
4818                 ext4_std_error(inode->i_sb, ret);
4819                 goto out_dio;
4820         }
4821
4822         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4823
4824         if (new_size) {
4825                 if (new_size > i_size_read(inode))
4826                         i_size_write(inode, new_size);
4827                 if (new_size > EXT4_I(inode)->i_disksize)
4828                         ext4_update_i_disksize(inode, new_size);
4829         } else {
4830                 /*
4831                 * Mark that we allocate beyond EOF so the subsequent truncate
4832                 * can proceed even if the new size is the same as i_size.
4833                 */
4834                 if ((offset + len) > i_size_read(inode))
4835                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4836         }
4837
4838         ext4_mark_inode_dirty(handle, inode);
4839
4840         /* Zero out partial block at the edges of the range */
4841         ret = ext4_zero_partial_blocks(handle, inode, offset, len);
4842
4843         if (file->f_flags & O_SYNC)
4844                 ext4_handle_sync(handle);
4845
4846         ext4_journal_stop(handle);
4847 out_dio:
4848         ext4_inode_resume_unlocked_dio(inode);
4849 out_mutex:
4850         mutex_unlock(&inode->i_mutex);
4851         return ret;
4852 }
4853
4854 /*
4855  * preallocate space for a file. This implements ext4's fallocate file
4856  * operation, which gets called from sys_fallocate system call.
4857  * For block-mapped files, posix_fallocate should fall back to the method
4858  * of writing zeroes to the required new blocks (the same behavior which is
4859  * expected for file systems which do not support fallocate() system call).
4860  */
4861 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4862 {
4863         struct inode *inode = file_inode(file);
4864         handle_t *handle;
4865         loff_t new_size = 0;
4866         unsigned int max_blocks;
4867         int ret = 0;
4868         int flags;
4869         ext4_lblk_t lblk;
4870         struct timespec tv;
4871         unsigned int blkbits = inode->i_blkbits;
4872
4873         /* Return error if mode is not supported */
4874         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
4875                      FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
4876                 return -EOPNOTSUPP;
4877
4878         if (mode & FALLOC_FL_PUNCH_HOLE)
4879                 return ext4_punch_hole(inode, offset, len);
4880
4881         if (mode & FALLOC_FL_COLLAPSE_RANGE)
4882                 return ext4_collapse_range(inode, offset, len);
4883
4884         ret = ext4_convert_inline_data(inode);
4885         if (ret)
4886                 return ret;
4887
4888         /*
4889          * currently supporting (pre)allocate mode for extent-based
4890          * files _only_
4891          */
4892         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4893                 return -EOPNOTSUPP;
4894
4895         if (mode & FALLOC_FL_ZERO_RANGE)
4896                 return ext4_zero_range(file, offset, len, mode);
4897
4898         trace_ext4_fallocate_enter(inode, offset, len, mode);
4899         lblk = offset >> blkbits;
4900         /*
4901          * We can't just convert len to max_blocks because
4902          * If blocksize = 4096 offset = 3072 and len = 2048
4903          */
4904         max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4905                 - lblk;
4906
4907         flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4908         if (mode & FALLOC_FL_KEEP_SIZE)
4909                 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4910
4911         mutex_lock(&inode->i_mutex);
4912
4913         if (!(mode & FALLOC_FL_KEEP_SIZE) &&
4914              offset + len > i_size_read(inode)) {
4915                 new_size = offset + len;
4916                 ret = inode_newsize_ok(inode, new_size);
4917                 if (ret)
4918                         goto out;
4919         }
4920
4921         ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, mode);
4922         if (ret)
4923                 goto out;
4924
4925         handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
4926         if (IS_ERR(handle))
4927                 goto out;
4928
4929         tv = inode->i_ctime = ext4_current_time(inode);
4930
4931         if (new_size) {
4932                 if (new_size > i_size_read(inode)) {
4933                         i_size_write(inode, new_size);
4934                         inode->i_mtime = tv;
4935                 }
4936                 if (new_size > EXT4_I(inode)->i_disksize)
4937                         ext4_update_i_disksize(inode, new_size);
4938         } else {
4939                 /*
4940                 * Mark that we allocate beyond EOF so the subsequent truncate
4941                 * can proceed even if the new size is the same as i_size.
4942                 */
4943                 if ((offset + len) > i_size_read(inode))
4944                         ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4945         }
4946         ext4_mark_inode_dirty(handle, inode);
4947         if (file->f_flags & O_SYNC)
4948                 ext4_handle_sync(handle);
4949
4950         ext4_journal_stop(handle);
4951 out:
4952         mutex_unlock(&inode->i_mutex);
4953         trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4954         return ret;
4955 }
4956
4957 /*
4958  * This function convert a range of blocks to written extents
4959  * The caller of this function will pass the start offset and the size.
4960  * all unwritten extents within this range will be converted to
4961  * written extents.
4962  *
4963  * This function is called from the direct IO end io call back
4964  * function, to convert the fallocated extents after IO is completed.
4965  * Returns 0 on success.
4966  */
4967 int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
4968                                    loff_t offset, ssize_t len)
4969 {
4970         unsigned int max_blocks;
4971         int ret = 0;
4972         int ret2 = 0;
4973         struct ext4_map_blocks map;
4974         unsigned int credits, blkbits = inode->i_blkbits;
4975
4976         map.m_lblk = offset >> blkbits;
4977         /*
4978          * We can't just convert len to max_blocks because
4979          * If blocksize = 4096 offset = 3072 and len = 2048
4980          */
4981         max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4982                       map.m_lblk);
4983         /*
4984          * This is somewhat ugly but the idea is clear: When transaction is
4985          * reserved, everything goes into it. Otherwise we rather start several
4986          * smaller transactions for conversion of each extent separately.
4987          */
4988         if (handle) {
4989                 handle = ext4_journal_start_reserved(handle,
4990                                                      EXT4_HT_EXT_CONVERT);
4991                 if (IS_ERR(handle))
4992                         return PTR_ERR(handle);
4993                 credits = 0;
4994         } else {
4995                 /*
4996                  * credits to insert 1 extent into extent tree
4997                  */
4998                 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4999         }
5000         while (ret >= 0 && ret < max_blocks) {
5001                 map.m_lblk += ret;
5002                 map.m_len = (max_blocks -= ret);
5003                 if (credits) {
5004                         handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
5005                                                     credits);
5006                         if (IS_ERR(handle)) {
5007                                 ret = PTR_ERR(handle);
5008                                 break;
5009                         }
5010                 }
5011                 ret = ext4_map_blocks(handle, inode, &map,
5012                                       EXT4_GET_BLOCKS_IO_CONVERT_EXT);
5013                 if (ret <= 0)
5014                         ext4_warning(inode->i_sb,
5015                                      "inode #%lu: block %u: len %u: "
5016                                      "ext4_ext_map_blocks returned %d",
5017                                      inode->i_ino, map.m_lblk,
5018                                      map.m_len, ret);
5019                 ext4_mark_inode_dirty(handle, inode);
5020                 if (credits)
5021                         ret2 = ext4_journal_stop(handle);
5022                 if (ret <= 0 || ret2)
5023                         break;
5024         }
5025         if (!credits)
5026                 ret2 = ext4_journal_stop(handle);
5027         return ret > 0 ? ret2 : ret;
5028 }
5029
5030 /*
5031  * If newes is not existing extent (newes->ec_pblk equals zero) find
5032  * delayed extent at start of newes and update newes accordingly and
5033  * return start of the next delayed extent.
5034  *
5035  * If newes is existing extent (newes->ec_pblk is not equal zero)
5036  * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
5037  * extent found. Leave newes unmodified.
5038  */
5039 static int ext4_find_delayed_extent(struct inode *inode,
5040                                     struct extent_status *newes)
5041 {
5042         struct extent_status es;
5043         ext4_lblk_t block, next_del;
5044
5045         if (newes->es_pblk == 0) {
5046                 ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
5047                                 newes->es_lblk + newes->es_len - 1, &es);
5048
5049                 /*
5050                  * No extent in extent-tree contains block @newes->es_pblk,
5051                  * then the block may stay in 1)a hole or 2)delayed-extent.
5052                  */
5053                 if (es.es_len == 0)
5054                         /* A hole found. */
5055                         return 0;
5056
5057                 if (es.es_lblk > newes->es_lblk) {
5058                         /* A hole found. */
5059                         newes->es_len = min(es.es_lblk - newes->es_lblk,
5060                                             newes->es_len);
5061                         return 0;
5062                 }
5063
5064                 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
5065         }
5066
5067         block = newes->es_lblk + newes->es_len;
5068         ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
5069         if (es.es_len == 0)
5070                 next_del = EXT_MAX_BLOCKS;
5071         else
5072                 next_del = es.es_lblk;
5073
5074         return next_del;
5075 }
5076 /* fiemap flags we can handle specified here */
5077 #define EXT4_FIEMAP_FLAGS       (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
5078
5079 static int ext4_xattr_fiemap(struct inode *inode,
5080                                 struct fiemap_extent_info *fieinfo)
5081 {
5082         __u64 physical = 0;
5083         __u64 length;
5084         __u32 flags = FIEMAP_EXTENT_LAST;
5085         int blockbits = inode->i_sb->s_blocksize_bits;
5086         int error = 0;
5087
5088         /* in-inode? */
5089         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
5090                 struct ext4_iloc iloc;
5091                 int offset;     /* offset of xattr in inode */
5092
5093                 error = ext4_get_inode_loc(inode, &iloc);
5094                 if (error)
5095                         return error;
5096                 physical = (__u64)iloc.bh->b_blocknr << blockbits;
5097                 offset = EXT4_GOOD_OLD_INODE_SIZE +
5098                                 EXT4_I(inode)->i_extra_isize;
5099                 physical += offset;
5100                 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
5101                 flags |= FIEMAP_EXTENT_DATA_INLINE;
5102                 brelse(iloc.bh);
5103         } else { /* external block */
5104                 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
5105                 length = inode->i_sb->s_blocksize;
5106         }
5107
5108         if (physical)
5109                 error = fiemap_fill_next_extent(fieinfo, 0, physical,
5110                                                 length, flags);
5111         return (error < 0 ? error : 0);
5112 }
5113
5114 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5115                 __u64 start, __u64 len)
5116 {
5117         ext4_lblk_t start_blk;
5118         int error = 0;
5119
5120         if (ext4_has_inline_data(inode)) {
5121                 int has_inline = 1;
5122
5123                 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
5124
5125                 if (has_inline)
5126                         return error;
5127         }
5128
5129         if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
5130                 error = ext4_ext_precache(inode);
5131                 if (error)
5132                         return error;
5133         }
5134
5135         /* fallback to generic here if not in extents fmt */
5136         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5137                 return generic_block_fiemap(inode, fieinfo, start, len,
5138                         ext4_get_block);
5139
5140         if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
5141                 return -EBADR;
5142
5143         if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
5144                 error = ext4_xattr_fiemap(inode, fieinfo);
5145         } else {
5146                 ext4_lblk_t len_blks;
5147                 __u64 last_blk;
5148
5149                 start_blk = start >> inode->i_sb->s_blocksize_bits;
5150                 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
5151                 if (last_blk >= EXT_MAX_BLOCKS)
5152                         last_blk = EXT_MAX_BLOCKS-1;
5153                 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
5154
5155                 /*
5156                  * Walk the extent tree gathering extent information
5157                  * and pushing extents back to the user.
5158                  */
5159                 error = ext4_fill_fiemap_extents(inode, start_blk,
5160                                                  len_blks, fieinfo);
5161         }
5162         ext4_es_lru_add(inode);
5163         return error;
5164 }
5165
5166 /*
5167  * ext4_access_path:
5168  * Function to access the path buffer for marking it dirty.
5169  * It also checks if there are sufficient credits left in the journal handle
5170  * to update path.
5171  */
5172 static int
5173 ext4_access_path(handle_t *handle, struct inode *inode,
5174                 struct ext4_ext_path *path)
5175 {
5176         int credits, err;
5177
5178         if (!ext4_handle_valid(handle))
5179                 return 0;
5180
5181         /*
5182          * Check if need to extend journal credits
5183          * 3 for leaf, sb, and inode plus 2 (bmap and group
5184          * descriptor) for each block group; assume two block
5185          * groups
5186          */
5187         if (handle->h_buffer_credits < 7) {
5188                 credits = ext4_writepage_trans_blocks(inode);
5189                 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
5190                 /* EAGAIN is success */
5191                 if (err && err != -EAGAIN)
5192                         return err;
5193         }
5194
5195         err = ext4_ext_get_access(handle, inode, path);
5196         return err;
5197 }
5198
5199 /*
5200  * ext4_ext_shift_path_extents:
5201  * Shift the extents of a path structure lying between path[depth].p_ext
5202  * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift
5203  * from starting block for each extent.
5204  */
5205 static int
5206 ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5207                             struct inode *inode, handle_t *handle,
5208                             ext4_lblk_t *start)
5209 {
5210         int depth, err = 0;
5211         struct ext4_extent *ex_start, *ex_last;
5212         bool update = 0;
5213         depth = path->p_depth;
5214
5215         while (depth >= 0) {
5216                 if (depth == path->p_depth) {
5217                         ex_start = path[depth].p_ext;
5218                         if (!ex_start)
5219                                 return -EIO;
5220
5221                         ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5222                         if (!ex_last)
5223                                 return -EIO;
5224
5225                         err = ext4_access_path(handle, inode, path + depth);
5226                         if (err)
5227                                 goto out;
5228
5229                         if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
5230                                 update = 1;
5231
5232                         *start = ex_last->ee_block +
5233                                 ext4_ext_get_actual_len(ex_last);
5234
5235                         while (ex_start <= ex_last) {
5236                                 ex_start->ee_block -= shift;
5237                                 if (ex_start >
5238                                         EXT_FIRST_EXTENT(path[depth].p_hdr)) {
5239                                         if (ext4_ext_try_to_merge_right(inode,
5240                                                 path, ex_start - 1))
5241                                                 ex_last--;
5242                                 }
5243                                 ex_start++;
5244                         }
5245                         err = ext4_ext_dirty(handle, inode, path + depth);
5246                         if (err)
5247                                 goto out;
5248
5249                         if (--depth < 0 || !update)
5250                                 break;
5251                 }
5252
5253                 /* Update index too */
5254                 err = ext4_access_path(handle, inode, path + depth);
5255                 if (err)
5256                         goto out;
5257
5258                 path[depth].p_idx->ei_block -= shift;
5259                 err = ext4_ext_dirty(handle, inode, path + depth);
5260                 if (err)
5261                         goto out;
5262
5263                 /* we are done if current index is not a starting index */
5264                 if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
5265                         break;
5266
5267                 depth--;
5268         }
5269
5270 out:
5271         return err;
5272 }
5273
5274 /*
5275  * ext4_ext_shift_extents:
5276  * All the extents which lies in the range from start to the last allocated
5277  * block for the file are shifted downwards by shift blocks.
5278  * On success, 0 is returned, error otherwise.
5279  */
5280 static int
5281 ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5282                        ext4_lblk_t start, ext4_lblk_t shift)
5283 {
5284         struct ext4_ext_path *path;
5285         int ret = 0, depth;
5286         struct ext4_extent *extent;
5287         ext4_lblk_t stop_block, current_block;
5288         ext4_lblk_t ex_start, ex_end;
5289
5290         /* Let path point to the last extent */
5291         path = ext4_ext_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
5292         if (IS_ERR(path))
5293                 return PTR_ERR(path);
5294
5295         depth = path->p_depth;
5296         extent = path[depth].p_ext;
5297         if (!extent) {
5298                 ext4_ext_drop_refs(path);
5299                 kfree(path);
5300                 return ret;
5301         }
5302
5303         stop_block = extent->ee_block + ext4_ext_get_actual_len(extent);
5304         ext4_ext_drop_refs(path);
5305         kfree(path);
5306
5307         /* Nothing to shift, if hole is at the end of file */
5308         if (start >= stop_block)
5309                 return ret;
5310
5311         /*
5312          * Don't start shifting extents until we make sure the hole is big
5313          * enough to accomodate the shift.
5314          */
5315         path = ext4_ext_find_extent(inode, start - 1, NULL, 0);
5316         depth = path->p_depth;
5317         extent =  path[depth].p_ext;
5318         ex_start = extent->ee_block;
5319         ex_end = extent->ee_block + ext4_ext_get_actual_len(extent);
5320         ext4_ext_drop_refs(path);
5321         kfree(path);
5322
5323         if ((start == ex_start && shift > ex_start) ||
5324             (shift > start - ex_end))
5325                 return -EINVAL;
5326
5327         /* Its safe to start updating extents */
5328         while (start < stop_block) {
5329                 path = ext4_ext_find_extent(inode, start, NULL, 0);
5330                 if (IS_ERR(path))
5331                         return PTR_ERR(path);
5332                 depth = path->p_depth;
5333                 extent = path[depth].p_ext;
5334                 current_block = extent->ee_block;
5335                 if (start > current_block) {
5336                         /* Hole, move to the next extent */
5337                         ret = mext_next_extent(inode, path, &extent);
5338                         if (ret != 0) {
5339                                 ext4_ext_drop_refs(path);
5340                                 kfree(path);
5341                                 if (ret == 1)
5342                                         ret = 0;
5343                                 break;
5344                         }
5345                 }
5346                 ret = ext4_ext_shift_path_extents(path, shift, inode,
5347                                 handle, &start);
5348                 ext4_ext_drop_refs(path);
5349                 kfree(path);
5350                 if (ret)
5351                         break;
5352         }
5353
5354         return ret;
5355 }
5356
5357 /*
5358  * ext4_collapse_range:
5359  * This implements the fallocate's collapse range functionality for ext4
5360  * Returns: 0 and non-zero on error.
5361  */
5362 int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
5363 {
5364         struct super_block *sb = inode->i_sb;
5365         ext4_lblk_t punch_start, punch_stop;
5366         handle_t *handle;
5367         unsigned int credits;
5368         loff_t new_size;
5369         int ret;
5370
5371         BUG_ON(offset + len > i_size_read(inode));
5372
5373         /* Collapse range works only on fs block size aligned offsets. */
5374         if (offset & (EXT4_BLOCK_SIZE(sb) - 1) ||
5375             len & (EXT4_BLOCK_SIZE(sb) - 1))
5376                 return -EINVAL;
5377
5378         if (!S_ISREG(inode->i_mode))
5379                 return -EOPNOTSUPP;
5380
5381         trace_ext4_collapse_range(inode, offset, len);
5382
5383         punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
5384         punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
5385
5386         /* Write out all dirty pages */
5387         ret = filemap_write_and_wait_range(inode->i_mapping, offset, -1);
5388         if (ret)
5389                 return ret;
5390
5391         /* Take mutex lock */
5392         mutex_lock(&inode->i_mutex);
5393
5394         /* It's not possible punch hole on append only file */
5395         if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
5396                 ret = -EPERM;
5397                 goto out_mutex;
5398         }
5399
5400         if (IS_SWAPFILE(inode)) {
5401                 ret = -ETXTBSY;
5402                 goto out_mutex;
5403         }
5404
5405         /* Currently just for extent based files */
5406         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
5407                 ret = -EOPNOTSUPP;
5408                 goto out_mutex;
5409         }
5410
5411         truncate_pagecache_range(inode, offset, -1);
5412
5413         /* Wait for existing dio to complete */
5414         ext4_inode_block_unlocked_dio(inode);
5415         inode_dio_wait(inode);
5416
5417         credits = ext4_writepage_trans_blocks(inode);
5418         handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
5419         if (IS_ERR(handle)) {
5420                 ret = PTR_ERR(handle);
5421                 goto out_dio;
5422         }
5423
5424         down_write(&EXT4_I(inode)->i_data_sem);
5425         ext4_discard_preallocations(inode);
5426
5427         ret = ext4_es_remove_extent(inode, punch_start,
5428                                     EXT_MAX_BLOCKS - punch_start - 1);
5429         if (ret) {
5430                 up_write(&EXT4_I(inode)->i_data_sem);
5431                 goto out_stop;
5432         }
5433
5434         ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
5435         if (ret) {
5436                 up_write(&EXT4_I(inode)->i_data_sem);
5437                 goto out_stop;
5438         }
5439
5440         ret = ext4_ext_shift_extents(inode, handle, punch_stop,
5441                                      punch_stop - punch_start);
5442         if (ret) {
5443                 up_write(&EXT4_I(inode)->i_data_sem);
5444                 goto out_stop;
5445         }
5446
5447         new_size = i_size_read(inode) - len;
5448         truncate_setsize(inode, new_size);
5449         EXT4_I(inode)->i_disksize = new_size;
5450
5451         ext4_discard_preallocations(inode);
5452         up_write(&EXT4_I(inode)->i_data_sem);
5453         if (IS_SYNC(inode))
5454                 ext4_handle_sync(handle);
5455         inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
5456         ext4_mark_inode_dirty(handle, inode);
5457
5458 out_stop:
5459         ext4_journal_stop(handle);
5460 out_dio:
5461         ext4_inode_resume_unlocked_dio(inode);
5462 out_mutex:
5463         mutex_unlock(&inode->i_mutex);
5464         return ret;
5465 }