]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/f2fs/segment.c
Merge branches 'uaccess.alpha', 'uaccess.arc', 'uaccess.arm', 'uaccess.arm64', 'uacce...
[karo-tx-linux.git] / fs / f2fs / segment.c
1 /*
2  * fs/f2fs/segment.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/bio.h>
14 #include <linux/blkdev.h>
15 #include <linux/prefetch.h>
16 #include <linux/kthread.h>
17 #include <linux/swap.h>
18 #include <linux/timer.h>
19
20 #include "f2fs.h"
21 #include "segment.h"
22 #include "node.h"
23 #include "trace.h"
24 #include <trace/events/f2fs.h>
25
26 #define __reverse_ffz(x) __reverse_ffs(~(x))
27
28 static struct kmem_cache *discard_entry_slab;
29 static struct kmem_cache *discard_cmd_slab;
30 static struct kmem_cache *sit_entry_set_slab;
31 static struct kmem_cache *inmem_entry_slab;
32
33 static unsigned long __reverse_ulong(unsigned char *str)
34 {
35         unsigned long tmp = 0;
36         int shift = 24, idx = 0;
37
38 #if BITS_PER_LONG == 64
39         shift = 56;
40 #endif
41         while (shift >= 0) {
42                 tmp |= (unsigned long)str[idx++] << shift;
43                 shift -= BITS_PER_BYTE;
44         }
45         return tmp;
46 }
47
48 /*
49  * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
50  * MSB and LSB are reversed in a byte by f2fs_set_bit.
51  */
52 static inline unsigned long __reverse_ffs(unsigned long word)
53 {
54         int num = 0;
55
56 #if BITS_PER_LONG == 64
57         if ((word & 0xffffffff00000000UL) == 0)
58                 num += 32;
59         else
60                 word >>= 32;
61 #endif
62         if ((word & 0xffff0000) == 0)
63                 num += 16;
64         else
65                 word >>= 16;
66
67         if ((word & 0xff00) == 0)
68                 num += 8;
69         else
70                 word >>= 8;
71
72         if ((word & 0xf0) == 0)
73                 num += 4;
74         else
75                 word >>= 4;
76
77         if ((word & 0xc) == 0)
78                 num += 2;
79         else
80                 word >>= 2;
81
82         if ((word & 0x2) == 0)
83                 num += 1;
84         return num;
85 }
86
87 /*
88  * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
89  * f2fs_set_bit makes MSB and LSB reversed in a byte.
90  * @size must be integral times of unsigned long.
91  * Example:
92  *                             MSB <--> LSB
93  *   f2fs_set_bit(0, bitmap) => 1000 0000
94  *   f2fs_set_bit(7, bitmap) => 0000 0001
95  */
96 static unsigned long __find_rev_next_bit(const unsigned long *addr,
97                         unsigned long size, unsigned long offset)
98 {
99         const unsigned long *p = addr + BIT_WORD(offset);
100         unsigned long result = size;
101         unsigned long tmp;
102
103         if (offset >= size)
104                 return size;
105
106         size -= (offset & ~(BITS_PER_LONG - 1));
107         offset %= BITS_PER_LONG;
108
109         while (1) {
110                 if (*p == 0)
111                         goto pass;
112
113                 tmp = __reverse_ulong((unsigned char *)p);
114
115                 tmp &= ~0UL >> offset;
116                 if (size < BITS_PER_LONG)
117                         tmp &= (~0UL << (BITS_PER_LONG - size));
118                 if (tmp)
119                         goto found;
120 pass:
121                 if (size <= BITS_PER_LONG)
122                         break;
123                 size -= BITS_PER_LONG;
124                 offset = 0;
125                 p++;
126         }
127         return result;
128 found:
129         return result - size + __reverse_ffs(tmp);
130 }
131
132 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
133                         unsigned long size, unsigned long offset)
134 {
135         const unsigned long *p = addr + BIT_WORD(offset);
136         unsigned long result = size;
137         unsigned long tmp;
138
139         if (offset >= size)
140                 return size;
141
142         size -= (offset & ~(BITS_PER_LONG - 1));
143         offset %= BITS_PER_LONG;
144
145         while (1) {
146                 if (*p == ~0UL)
147                         goto pass;
148
149                 tmp = __reverse_ulong((unsigned char *)p);
150
151                 if (offset)
152                         tmp |= ~0UL << (BITS_PER_LONG - offset);
153                 if (size < BITS_PER_LONG)
154                         tmp |= ~0UL >> size;
155                 if (tmp != ~0UL)
156                         goto found;
157 pass:
158                 if (size <= BITS_PER_LONG)
159                         break;
160                 size -= BITS_PER_LONG;
161                 offset = 0;
162                 p++;
163         }
164         return result;
165 found:
166         return result - size + __reverse_ffz(tmp);
167 }
168
169 void register_inmem_page(struct inode *inode, struct page *page)
170 {
171         struct f2fs_inode_info *fi = F2FS_I(inode);
172         struct inmem_pages *new;
173
174         f2fs_trace_pid(page);
175
176         set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
177         SetPagePrivate(page);
178
179         new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
180
181         /* add atomic page indices to the list */
182         new->page = page;
183         INIT_LIST_HEAD(&new->list);
184
185         /* increase reference count with clean state */
186         mutex_lock(&fi->inmem_lock);
187         get_page(page);
188         list_add_tail(&new->list, &fi->inmem_pages);
189         inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
190         mutex_unlock(&fi->inmem_lock);
191
192         trace_f2fs_register_inmem_page(page, INMEM);
193 }
194
195 static int __revoke_inmem_pages(struct inode *inode,
196                                 struct list_head *head, bool drop, bool recover)
197 {
198         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
199         struct inmem_pages *cur, *tmp;
200         int err = 0;
201
202         list_for_each_entry_safe(cur, tmp, head, list) {
203                 struct page *page = cur->page;
204
205                 if (drop)
206                         trace_f2fs_commit_inmem_page(page, INMEM_DROP);
207
208                 lock_page(page);
209
210                 if (recover) {
211                         struct dnode_of_data dn;
212                         struct node_info ni;
213
214                         trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
215
216                         set_new_dnode(&dn, inode, NULL, NULL, 0);
217                         if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
218                                 err = -EAGAIN;
219                                 goto next;
220                         }
221                         get_node_info(sbi, dn.nid, &ni);
222                         f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
223                                         cur->old_addr, ni.version, true, true);
224                         f2fs_put_dnode(&dn);
225                 }
226 next:
227                 /* we don't need to invalidate this in the sccessful status */
228                 if (drop || recover)
229                         ClearPageUptodate(page);
230                 set_page_private(page, 0);
231                 ClearPagePrivate(page);
232                 f2fs_put_page(page, 1);
233
234                 list_del(&cur->list);
235                 kmem_cache_free(inmem_entry_slab, cur);
236                 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
237         }
238         return err;
239 }
240
241 void drop_inmem_pages(struct inode *inode)
242 {
243         struct f2fs_inode_info *fi = F2FS_I(inode);
244
245         mutex_lock(&fi->inmem_lock);
246         __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
247         mutex_unlock(&fi->inmem_lock);
248
249         clear_inode_flag(inode, FI_ATOMIC_FILE);
250         stat_dec_atomic_write(inode);
251 }
252
253 static int __commit_inmem_pages(struct inode *inode,
254                                         struct list_head *revoke_list)
255 {
256         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
257         struct f2fs_inode_info *fi = F2FS_I(inode);
258         struct inmem_pages *cur, *tmp;
259         struct f2fs_io_info fio = {
260                 .sbi = sbi,
261                 .type = DATA,
262                 .op = REQ_OP_WRITE,
263                 .op_flags = REQ_SYNC | REQ_PRIO,
264                 .encrypted_page = NULL,
265         };
266         pgoff_t last_idx = ULONG_MAX;
267         int err = 0;
268
269         list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
270                 struct page *page = cur->page;
271
272                 lock_page(page);
273                 if (page->mapping == inode->i_mapping) {
274                         trace_f2fs_commit_inmem_page(page, INMEM);
275
276                         set_page_dirty(page);
277                         f2fs_wait_on_page_writeback(page, DATA, true);
278                         if (clear_page_dirty_for_io(page)) {
279                                 inode_dec_dirty_pages(inode);
280                                 remove_dirty_inode(inode);
281                         }
282
283                         fio.page = page;
284                         err = do_write_data_page(&fio);
285                         if (err) {
286                                 unlock_page(page);
287                                 break;
288                         }
289
290                         /* record old blkaddr for revoking */
291                         cur->old_addr = fio.old_blkaddr;
292                         last_idx = page->index;
293                 }
294                 unlock_page(page);
295                 list_move_tail(&cur->list, revoke_list);
296         }
297
298         if (last_idx != ULONG_MAX)
299                 f2fs_submit_merged_bio_cond(sbi, inode, 0, last_idx,
300                                                         DATA, WRITE);
301
302         if (!err)
303                 __revoke_inmem_pages(inode, revoke_list, false, false);
304
305         return err;
306 }
307
308 int commit_inmem_pages(struct inode *inode)
309 {
310         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
311         struct f2fs_inode_info *fi = F2FS_I(inode);
312         struct list_head revoke_list;
313         int err;
314
315         INIT_LIST_HEAD(&revoke_list);
316         f2fs_balance_fs(sbi, true);
317         f2fs_lock_op(sbi);
318
319         set_inode_flag(inode, FI_ATOMIC_COMMIT);
320
321         mutex_lock(&fi->inmem_lock);
322         err = __commit_inmem_pages(inode, &revoke_list);
323         if (err) {
324                 int ret;
325                 /*
326                  * try to revoke all committed pages, but still we could fail
327                  * due to no memory or other reason, if that happened, EAGAIN
328                  * will be returned, which means in such case, transaction is
329                  * already not integrity, caller should use journal to do the
330                  * recovery or rewrite & commit last transaction. For other
331                  * error number, revoking was done by filesystem itself.
332                  */
333                 ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
334                 if (ret)
335                         err = ret;
336
337                 /* drop all uncommitted pages */
338                 __revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
339         }
340         mutex_unlock(&fi->inmem_lock);
341
342         clear_inode_flag(inode, FI_ATOMIC_COMMIT);
343
344         f2fs_unlock_op(sbi);
345         return err;
346 }
347
348 /*
349  * This function balances dirty node and dentry pages.
350  * In addition, it controls garbage collection.
351  */
352 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
353 {
354 #ifdef CONFIG_F2FS_FAULT_INJECTION
355         if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
356                 f2fs_show_injection_info(FAULT_CHECKPOINT);
357                 f2fs_stop_checkpoint(sbi, false);
358         }
359 #endif
360
361         if (!need)
362                 return;
363
364         /* balance_fs_bg is able to be pending */
365         if (excess_cached_nats(sbi))
366                 f2fs_balance_fs_bg(sbi);
367
368         /*
369          * We should do GC or end up with checkpoint, if there are so many dirty
370          * dir/node pages without enough free segments.
371          */
372         if (has_not_enough_free_secs(sbi, 0, 0)) {
373                 mutex_lock(&sbi->gc_mutex);
374                 f2fs_gc(sbi, false, false);
375         }
376 }
377
378 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
379 {
380         /* try to shrink extent cache when there is no enough memory */
381         if (!available_free_memory(sbi, EXTENT_CACHE))
382                 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
383
384         /* check the # of cached NAT entries */
385         if (!available_free_memory(sbi, NAT_ENTRIES))
386                 try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
387
388         if (!available_free_memory(sbi, FREE_NIDS))
389                 try_to_free_nids(sbi, MAX_FREE_NIDS);
390         else
391                 build_free_nids(sbi, false, false);
392
393         if (!is_idle(sbi))
394                 return;
395
396         /* checkpoint is the only way to shrink partial cached entries */
397         if (!available_free_memory(sbi, NAT_ENTRIES) ||
398                         !available_free_memory(sbi, INO_ENTRIES) ||
399                         excess_prefree_segs(sbi) ||
400                         excess_dirty_nats(sbi) ||
401                         f2fs_time_over(sbi, CP_TIME)) {
402                 if (test_opt(sbi, DATA_FLUSH)) {
403                         struct blk_plug plug;
404
405                         blk_start_plug(&plug);
406                         sync_dirty_inodes(sbi, FILE_INODE);
407                         blk_finish_plug(&plug);
408                 }
409                 f2fs_sync_fs(sbi->sb, true);
410                 stat_inc_bg_cp_count(sbi->stat_info);
411         }
412 }
413
414 static int __submit_flush_wait(struct block_device *bdev)
415 {
416         struct bio *bio = f2fs_bio_alloc(0);
417         int ret;
418
419         bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
420         bio->bi_bdev = bdev;
421         ret = submit_bio_wait(bio);
422         bio_put(bio);
423         return ret;
424 }
425
426 static int submit_flush_wait(struct f2fs_sb_info *sbi)
427 {
428         int ret = __submit_flush_wait(sbi->sb->s_bdev);
429         int i;
430
431         if (sbi->s_ndevs && !ret) {
432                 for (i = 1; i < sbi->s_ndevs; i++) {
433                         trace_f2fs_issue_flush(FDEV(i).bdev,
434                                         test_opt(sbi, NOBARRIER),
435                                         test_opt(sbi, FLUSH_MERGE));
436                         ret = __submit_flush_wait(FDEV(i).bdev);
437                         if (ret)
438                                 break;
439                 }
440         }
441         return ret;
442 }
443
444 static int issue_flush_thread(void *data)
445 {
446         struct f2fs_sb_info *sbi = data;
447         struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
448         wait_queue_head_t *q = &fcc->flush_wait_queue;
449 repeat:
450         if (kthread_should_stop())
451                 return 0;
452
453         if (!llist_empty(&fcc->issue_list)) {
454                 struct flush_cmd *cmd, *next;
455                 int ret;
456
457                 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
458                 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
459
460                 ret = submit_flush_wait(sbi);
461                 llist_for_each_entry_safe(cmd, next,
462                                           fcc->dispatch_list, llnode) {
463                         cmd->ret = ret;
464                         complete(&cmd->wait);
465                 }
466                 fcc->dispatch_list = NULL;
467         }
468
469         wait_event_interruptible(*q,
470                 kthread_should_stop() || !llist_empty(&fcc->issue_list));
471         goto repeat;
472 }
473
474 int f2fs_issue_flush(struct f2fs_sb_info *sbi)
475 {
476         struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
477         struct flush_cmd cmd;
478
479         if (test_opt(sbi, NOBARRIER))
480                 return 0;
481
482         if (!test_opt(sbi, FLUSH_MERGE))
483                 return submit_flush_wait(sbi);
484
485         if (!atomic_read(&fcc->submit_flush)) {
486                 int ret;
487
488                 atomic_inc(&fcc->submit_flush);
489                 ret = submit_flush_wait(sbi);
490                 atomic_dec(&fcc->submit_flush);
491                 return ret;
492         }
493
494         init_completion(&cmd.wait);
495
496         atomic_inc(&fcc->submit_flush);
497         llist_add(&cmd.llnode, &fcc->issue_list);
498
499         if (!fcc->dispatch_list)
500                 wake_up(&fcc->flush_wait_queue);
501
502         if (fcc->f2fs_issue_flush) {
503                 wait_for_completion(&cmd.wait);
504                 atomic_dec(&fcc->submit_flush);
505         } else {
506                 llist_del_all(&fcc->issue_list);
507                 atomic_set(&fcc->submit_flush, 0);
508         }
509
510         return cmd.ret;
511 }
512
513 int create_flush_cmd_control(struct f2fs_sb_info *sbi)
514 {
515         dev_t dev = sbi->sb->s_bdev->bd_dev;
516         struct flush_cmd_control *fcc;
517         int err = 0;
518
519         if (SM_I(sbi)->fcc_info) {
520                 fcc = SM_I(sbi)->fcc_info;
521                 goto init_thread;
522         }
523
524         fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
525         if (!fcc)
526                 return -ENOMEM;
527         atomic_set(&fcc->submit_flush, 0);
528         init_waitqueue_head(&fcc->flush_wait_queue);
529         init_llist_head(&fcc->issue_list);
530         SM_I(sbi)->fcc_info = fcc;
531 init_thread:
532         fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
533                                 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
534         if (IS_ERR(fcc->f2fs_issue_flush)) {
535                 err = PTR_ERR(fcc->f2fs_issue_flush);
536                 kfree(fcc);
537                 SM_I(sbi)->fcc_info = NULL;
538                 return err;
539         }
540
541         return err;
542 }
543
544 void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
545 {
546         struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
547
548         if (fcc && fcc->f2fs_issue_flush) {
549                 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
550
551                 fcc->f2fs_issue_flush = NULL;
552                 kthread_stop(flush_thread);
553         }
554         if (free) {
555                 kfree(fcc);
556                 SM_I(sbi)->fcc_info = NULL;
557         }
558 }
559
560 static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
561                 enum dirty_type dirty_type)
562 {
563         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
564
565         /* need not be added */
566         if (IS_CURSEG(sbi, segno))
567                 return;
568
569         if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
570                 dirty_i->nr_dirty[dirty_type]++;
571
572         if (dirty_type == DIRTY) {
573                 struct seg_entry *sentry = get_seg_entry(sbi, segno);
574                 enum dirty_type t = sentry->type;
575
576                 if (unlikely(t >= DIRTY)) {
577                         f2fs_bug_on(sbi, 1);
578                         return;
579                 }
580                 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
581                         dirty_i->nr_dirty[t]++;
582         }
583 }
584
585 static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
586                 enum dirty_type dirty_type)
587 {
588         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
589
590         if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
591                 dirty_i->nr_dirty[dirty_type]--;
592
593         if (dirty_type == DIRTY) {
594                 struct seg_entry *sentry = get_seg_entry(sbi, segno);
595                 enum dirty_type t = sentry->type;
596
597                 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
598                         dirty_i->nr_dirty[t]--;
599
600                 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
601                         clear_bit(GET_SECNO(sbi, segno),
602                                                 dirty_i->victim_secmap);
603         }
604 }
605
606 /*
607  * Should not occur error such as -ENOMEM.
608  * Adding dirty entry into seglist is not critical operation.
609  * If a given segment is one of current working segments, it won't be added.
610  */
611 static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
612 {
613         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
614         unsigned short valid_blocks;
615
616         if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
617                 return;
618
619         mutex_lock(&dirty_i->seglist_lock);
620
621         valid_blocks = get_valid_blocks(sbi, segno, 0);
622
623         if (valid_blocks == 0) {
624                 __locate_dirty_segment(sbi, segno, PRE);
625                 __remove_dirty_segment(sbi, segno, DIRTY);
626         } else if (valid_blocks < sbi->blocks_per_seg) {
627                 __locate_dirty_segment(sbi, segno, DIRTY);
628         } else {
629                 /* Recovery routine with SSR needs this */
630                 __remove_dirty_segment(sbi, segno, DIRTY);
631         }
632
633         mutex_unlock(&dirty_i->seglist_lock);
634 }
635
636 static void __add_discard_cmd(struct f2fs_sb_info *sbi,
637                         struct bio *bio, block_t lstart, block_t len)
638 {
639         struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
640         struct list_head *cmd_list = &(dcc->discard_cmd_list);
641         struct discard_cmd *dc;
642
643         dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
644         INIT_LIST_HEAD(&dc->list);
645         dc->bio = bio;
646         bio->bi_private = dc;
647         dc->lstart = lstart;
648         dc->len = len;
649         dc->state = D_PREP;
650         init_completion(&dc->wait);
651
652         mutex_lock(&dcc->cmd_lock);
653         list_add_tail(&dc->list, cmd_list);
654         mutex_unlock(&dcc->cmd_lock);
655 }
656
657 static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc)
658 {
659         int err = dc->bio->bi_error;
660
661         if (dc->state == D_DONE)
662                 atomic_dec(&(SM_I(sbi)->dcc_info->submit_discard));
663
664         if (err == -EOPNOTSUPP)
665                 err = 0;
666
667         if (err)
668                 f2fs_msg(sbi->sb, KERN_INFO,
669                                 "Issue discard failed, ret: %d", err);
670         bio_put(dc->bio);
671         list_del(&dc->list);
672         kmem_cache_free(discard_cmd_slab, dc);
673 }
674
675 /* This should be covered by global mutex, &sit_i->sentry_lock */
676 void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
677 {
678         struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
679         struct list_head *wait_list = &(dcc->discard_cmd_list);
680         struct discard_cmd *dc, *tmp;
681         struct blk_plug plug;
682
683         mutex_lock(&dcc->cmd_lock);
684
685         blk_start_plug(&plug);
686
687         list_for_each_entry_safe(dc, tmp, wait_list, list) {
688
689                 if (blkaddr == NULL_ADDR) {
690                         if (dc->state == D_PREP) {
691                                 dc->state = D_SUBMIT;
692                                 submit_bio(dc->bio);
693                                 atomic_inc(&dcc->submit_discard);
694                         }
695                         continue;
696                 }
697
698                 if (dc->lstart <= blkaddr && blkaddr < dc->lstart + dc->len) {
699                         if (dc->state == D_SUBMIT)
700                                 wait_for_completion_io(&dc->wait);
701                         else
702                                 __remove_discard_cmd(sbi, dc);
703                 }
704         }
705         blk_finish_plug(&plug);
706
707         /* this comes from f2fs_put_super */
708         if (blkaddr == NULL_ADDR) {
709                 list_for_each_entry_safe(dc, tmp, wait_list, list) {
710                         wait_for_completion_io(&dc->wait);
711                         __remove_discard_cmd(sbi, dc);
712                 }
713         }
714         mutex_unlock(&dcc->cmd_lock);
715 }
716
717 static void f2fs_submit_discard_endio(struct bio *bio)
718 {
719         struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
720
721         complete(&dc->wait);
722         dc->state = D_DONE;
723 }
724
725 static int issue_discard_thread(void *data)
726 {
727         struct f2fs_sb_info *sbi = data;
728         struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
729         wait_queue_head_t *q = &dcc->discard_wait_queue;
730         struct list_head *cmd_list = &dcc->discard_cmd_list;
731         struct discard_cmd *dc, *tmp;
732         struct blk_plug plug;
733         int iter = 0;
734 repeat:
735         if (kthread_should_stop())
736                 return 0;
737
738         blk_start_plug(&plug);
739
740         mutex_lock(&dcc->cmd_lock);
741         list_for_each_entry_safe(dc, tmp, cmd_list, list) {
742                 if (dc->state == D_PREP) {
743                         dc->state = D_SUBMIT;
744                         submit_bio(dc->bio);
745                         atomic_inc(&dcc->submit_discard);
746                         if (iter++ > DISCARD_ISSUE_RATE)
747                                 break;
748                 } else if (dc->state == D_DONE) {
749                         __remove_discard_cmd(sbi, dc);
750                 }
751         }
752         mutex_unlock(&dcc->cmd_lock);
753
754         blk_finish_plug(&plug);
755
756         iter = 0;
757         congestion_wait(BLK_RW_SYNC, HZ/50);
758
759         wait_event_interruptible(*q,
760                 kthread_should_stop() || !list_empty(&dcc->discard_cmd_list));
761         goto repeat;
762 }
763
764
765 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
766 static int __f2fs_issue_discard_async(struct f2fs_sb_info *sbi,
767                 struct block_device *bdev, block_t blkstart, block_t blklen)
768 {
769         struct bio *bio = NULL;
770         block_t lblkstart = blkstart;
771         int err;
772
773         trace_f2fs_issue_discard(bdev, blkstart, blklen);
774
775         if (sbi->s_ndevs) {
776                 int devi = f2fs_target_device_index(sbi, blkstart);
777
778                 blkstart -= FDEV(devi).start_blk;
779         }
780         err = __blkdev_issue_discard(bdev,
781                                 SECTOR_FROM_BLOCK(blkstart),
782                                 SECTOR_FROM_BLOCK(blklen),
783                                 GFP_NOFS, 0, &bio);
784         if (!err && bio) {
785                 bio->bi_end_io = f2fs_submit_discard_endio;
786                 bio->bi_opf |= REQ_SYNC;
787
788                 __add_discard_cmd(sbi, bio, lblkstart, blklen);
789                 wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
790         }
791         return err;
792 }
793
794 #ifdef CONFIG_BLK_DEV_ZONED
795 static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
796                 struct block_device *bdev, block_t blkstart, block_t blklen)
797 {
798         sector_t sector, nr_sects;
799         int devi = 0;
800
801         if (sbi->s_ndevs) {
802                 devi = f2fs_target_device_index(sbi, blkstart);
803                 blkstart -= FDEV(devi).start_blk;
804         }
805
806         /*
807          * We need to know the type of the zone: for conventional zones,
808          * use regular discard if the drive supports it. For sequential
809          * zones, reset the zone write pointer.
810          */
811         switch (get_blkz_type(sbi, bdev, blkstart)) {
812
813         case BLK_ZONE_TYPE_CONVENTIONAL:
814                 if (!blk_queue_discard(bdev_get_queue(bdev)))
815                         return 0;
816                 return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
817         case BLK_ZONE_TYPE_SEQWRITE_REQ:
818         case BLK_ZONE_TYPE_SEQWRITE_PREF:
819                 sector = SECTOR_FROM_BLOCK(blkstart);
820                 nr_sects = SECTOR_FROM_BLOCK(blklen);
821
822                 if (sector & (bdev_zone_sectors(bdev) - 1) ||
823                                 nr_sects != bdev_zone_sectors(bdev)) {
824                         f2fs_msg(sbi->sb, KERN_INFO,
825                                 "(%d) %s: Unaligned discard attempted (block %x + %x)",
826                                 devi, sbi->s_ndevs ? FDEV(devi).path: "",
827                                 blkstart, blklen);
828                         return -EIO;
829                 }
830                 trace_f2fs_issue_reset_zone(bdev, blkstart);
831                 return blkdev_reset_zones(bdev, sector,
832                                           nr_sects, GFP_NOFS);
833         default:
834                 /* Unknown zone type: broken device ? */
835                 return -EIO;
836         }
837 }
838 #endif
839
840 static int __issue_discard_async(struct f2fs_sb_info *sbi,
841                 struct block_device *bdev, block_t blkstart, block_t blklen)
842 {
843 #ifdef CONFIG_BLK_DEV_ZONED
844         if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
845                                 bdev_zoned_model(bdev) != BLK_ZONED_NONE)
846                 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
847 #endif
848         return __f2fs_issue_discard_async(sbi, bdev, blkstart, blklen);
849 }
850
851 static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
852                                 block_t blkstart, block_t blklen)
853 {
854         sector_t start = blkstart, len = 0;
855         struct block_device *bdev;
856         struct seg_entry *se;
857         unsigned int offset;
858         block_t i;
859         int err = 0;
860
861         bdev = f2fs_target_device(sbi, blkstart, NULL);
862
863         for (i = blkstart; i < blkstart + blklen; i++, len++) {
864                 if (i != start) {
865                         struct block_device *bdev2 =
866                                 f2fs_target_device(sbi, i, NULL);
867
868                         if (bdev2 != bdev) {
869                                 err = __issue_discard_async(sbi, bdev,
870                                                 start, len);
871                                 if (err)
872                                         return err;
873                                 bdev = bdev2;
874                                 start = i;
875                                 len = 0;
876                         }
877                 }
878
879                 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
880                 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
881
882                 if (!f2fs_test_and_set_bit(offset, se->discard_map))
883                         sbi->discard_blks--;
884         }
885
886         if (len)
887                 err = __issue_discard_async(sbi, bdev, start, len);
888         return err;
889 }
890
891 static void __add_discard_entry(struct f2fs_sb_info *sbi,
892                 struct cp_control *cpc, struct seg_entry *se,
893                 unsigned int start, unsigned int end)
894 {
895         struct list_head *head = &SM_I(sbi)->dcc_info->discard_entry_list;
896         struct discard_entry *new, *last;
897
898         if (!list_empty(head)) {
899                 last = list_last_entry(head, struct discard_entry, list);
900                 if (START_BLOCK(sbi, cpc->trim_start) + start ==
901                                 last->blkaddr + last->len &&
902                                 last->len < MAX_DISCARD_BLOCKS(sbi)) {
903                         last->len += end - start;
904                         goto done;
905                 }
906         }
907
908         new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
909         INIT_LIST_HEAD(&new->list);
910         new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
911         new->len = end - start;
912         list_add_tail(&new->list, head);
913 done:
914         SM_I(sbi)->dcc_info->nr_discards += end - start;
915 }
916
917 static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
918                                                         bool check_only)
919 {
920         int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
921         int max_blocks = sbi->blocks_per_seg;
922         struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
923         unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
924         unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
925         unsigned long *discard_map = (unsigned long *)se->discard_map;
926         unsigned long *dmap = SIT_I(sbi)->tmp_map;
927         unsigned int start = 0, end = -1;
928         bool force = (cpc->reason == CP_DISCARD);
929         int i;
930
931         if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
932                 return false;
933
934         if (!force) {
935                 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
936                         SM_I(sbi)->dcc_info->nr_discards >=
937                                 SM_I(sbi)->dcc_info->max_discards)
938                         return false;
939         }
940
941         /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
942         for (i = 0; i < entries; i++)
943                 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
944                                 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
945
946         while (force || SM_I(sbi)->dcc_info->nr_discards <=
947                                 SM_I(sbi)->dcc_info->max_discards) {
948                 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
949                 if (start >= max_blocks)
950                         break;
951
952                 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
953                 if (force && start && end != max_blocks
954                                         && (end - start) < cpc->trim_minlen)
955                         continue;
956
957                 if (check_only)
958                         return true;
959
960                 __add_discard_entry(sbi, cpc, se, start, end);
961         }
962         return false;
963 }
964
965 void release_discard_addrs(struct f2fs_sb_info *sbi)
966 {
967         struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
968         struct discard_entry *entry, *this;
969
970         /* drop caches */
971         list_for_each_entry_safe(entry, this, head, list) {
972                 list_del(&entry->list);
973                 kmem_cache_free(discard_entry_slab, entry);
974         }
975 }
976
977 /*
978  * Should call clear_prefree_segments after checkpoint is done.
979  */
980 static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
981 {
982         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
983         unsigned int segno;
984
985         mutex_lock(&dirty_i->seglist_lock);
986         for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
987                 __set_test_and_free(sbi, segno);
988         mutex_unlock(&dirty_i->seglist_lock);
989 }
990
991 void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
992 {
993         struct list_head *head = &(SM_I(sbi)->dcc_info->discard_entry_list);
994         struct discard_entry *entry, *this;
995         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
996         unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
997         unsigned int start = 0, end = -1;
998         unsigned int secno, start_segno;
999         bool force = (cpc->reason == CP_DISCARD);
1000
1001         mutex_lock(&dirty_i->seglist_lock);
1002
1003         while (1) {
1004                 int i;
1005                 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1006                 if (start >= MAIN_SEGS(sbi))
1007                         break;
1008                 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1009                                                                 start + 1);
1010
1011                 for (i = start; i < end; i++)
1012                         clear_bit(i, prefree_map);
1013
1014                 dirty_i->nr_dirty[PRE] -= end - start;
1015
1016                 if (!test_opt(sbi, DISCARD))
1017                         continue;
1018
1019                 if (force && start >= cpc->trim_start &&
1020                                         (end - 1) <= cpc->trim_end)
1021                                 continue;
1022
1023                 if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
1024                         f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
1025                                 (end - start) << sbi->log_blocks_per_seg);
1026                         continue;
1027                 }
1028 next:
1029                 secno = GET_SECNO(sbi, start);
1030                 start_segno = secno * sbi->segs_per_sec;
1031                 if (!IS_CURSEC(sbi, secno) &&
1032                         !get_valid_blocks(sbi, start, sbi->segs_per_sec))
1033                         f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1034                                 sbi->segs_per_sec << sbi->log_blocks_per_seg);
1035
1036                 start = start_segno + sbi->segs_per_sec;
1037                 if (start < end)
1038                         goto next;
1039                 else
1040                         end = start - 1;
1041         }
1042         mutex_unlock(&dirty_i->seglist_lock);
1043
1044         /* send small discards */
1045         list_for_each_entry_safe(entry, this, head, list) {
1046                 if (force && entry->len < cpc->trim_minlen)
1047                         goto skip;
1048                 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
1049                 cpc->trimmed += entry->len;
1050 skip:
1051                 list_del(&entry->list);
1052                 SM_I(sbi)->dcc_info->nr_discards -= entry->len;
1053                 kmem_cache_free(discard_entry_slab, entry);
1054         }
1055 }
1056
1057 static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
1058 {
1059         dev_t dev = sbi->sb->s_bdev->bd_dev;
1060         struct discard_cmd_control *dcc;
1061         int err = 0;
1062
1063         if (SM_I(sbi)->dcc_info) {
1064                 dcc = SM_I(sbi)->dcc_info;
1065                 goto init_thread;
1066         }
1067
1068         dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
1069         if (!dcc)
1070                 return -ENOMEM;
1071
1072         INIT_LIST_HEAD(&dcc->discard_entry_list);
1073         INIT_LIST_HEAD(&dcc->discard_cmd_list);
1074         mutex_init(&dcc->cmd_lock);
1075         atomic_set(&dcc->submit_discard, 0);
1076         dcc->nr_discards = 0;
1077         dcc->max_discards = 0;
1078
1079         init_waitqueue_head(&dcc->discard_wait_queue);
1080         SM_I(sbi)->dcc_info = dcc;
1081 init_thread:
1082         dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
1083                                 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
1084         if (IS_ERR(dcc->f2fs_issue_discard)) {
1085                 err = PTR_ERR(dcc->f2fs_issue_discard);
1086                 kfree(dcc);
1087                 SM_I(sbi)->dcc_info = NULL;
1088                 return err;
1089         }
1090
1091         return err;
1092 }
1093
1094 static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi, bool free)
1095 {
1096         struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1097
1098         if (dcc && dcc->f2fs_issue_discard) {
1099                 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1100
1101                 dcc->f2fs_issue_discard = NULL;
1102                 kthread_stop(discard_thread);
1103         }
1104         if (free) {
1105                 kfree(dcc);
1106                 SM_I(sbi)->dcc_info = NULL;
1107         }
1108 }
1109
1110 static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
1111 {
1112         struct sit_info *sit_i = SIT_I(sbi);
1113
1114         if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
1115                 sit_i->dirty_sentries++;
1116                 return false;
1117         }
1118
1119         return true;
1120 }
1121
1122 static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
1123                                         unsigned int segno, int modified)
1124 {
1125         struct seg_entry *se = get_seg_entry(sbi, segno);
1126         se->type = type;
1127         if (modified)
1128                 __mark_sit_entry_dirty(sbi, segno);
1129 }
1130
1131 static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
1132 {
1133         struct seg_entry *se;
1134         unsigned int segno, offset;
1135         long int new_vblocks;
1136
1137         segno = GET_SEGNO(sbi, blkaddr);
1138
1139         se = get_seg_entry(sbi, segno);
1140         new_vblocks = se->valid_blocks + del;
1141         offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1142
1143         f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
1144                                 (new_vblocks > sbi->blocks_per_seg)));
1145
1146         se->valid_blocks = new_vblocks;
1147         se->mtime = get_mtime(sbi);
1148         SIT_I(sbi)->max_mtime = se->mtime;
1149
1150         /* Update valid block bitmap */
1151         if (del > 0) {
1152                 if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
1153 #ifdef CONFIG_F2FS_CHECK_FS
1154                         if (f2fs_test_and_set_bit(offset,
1155                                                 se->cur_valid_map_mir))
1156                                 f2fs_bug_on(sbi, 1);
1157                         else
1158                                 WARN_ON(1);
1159 #else
1160                         f2fs_bug_on(sbi, 1);
1161 #endif
1162                 }
1163                 if (f2fs_discard_en(sbi) &&
1164                         !f2fs_test_and_set_bit(offset, se->discard_map))
1165                         sbi->discard_blks--;
1166
1167                 /* don't overwrite by SSR to keep node chain */
1168                 if (se->type == CURSEG_WARM_NODE) {
1169                         if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
1170                                 se->ckpt_valid_blocks++;
1171                 }
1172         } else {
1173                 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
1174 #ifdef CONFIG_F2FS_CHECK_FS
1175                         if (!f2fs_test_and_clear_bit(offset,
1176                                                 se->cur_valid_map_mir))
1177                                 f2fs_bug_on(sbi, 1);
1178                         else
1179                                 WARN_ON(1);
1180 #else
1181                         f2fs_bug_on(sbi, 1);
1182 #endif
1183                 }
1184                 if (f2fs_discard_en(sbi) &&
1185                         f2fs_test_and_clear_bit(offset, se->discard_map))
1186                         sbi->discard_blks++;
1187         }
1188         if (!f2fs_test_bit(offset, se->ckpt_valid_map))
1189                 se->ckpt_valid_blocks += del;
1190
1191         __mark_sit_entry_dirty(sbi, segno);
1192
1193         /* update total number of valid blocks to be written in ckpt area */
1194         SIT_I(sbi)->written_valid_blocks += del;
1195
1196         if (sbi->segs_per_sec > 1)
1197                 get_sec_entry(sbi, segno)->valid_blocks += del;
1198 }
1199
1200 void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
1201 {
1202         update_sit_entry(sbi, new, 1);
1203         if (GET_SEGNO(sbi, old) != NULL_SEGNO)
1204                 update_sit_entry(sbi, old, -1);
1205
1206         locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
1207         locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
1208 }
1209
1210 void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
1211 {
1212         unsigned int segno = GET_SEGNO(sbi, addr);
1213         struct sit_info *sit_i = SIT_I(sbi);
1214
1215         f2fs_bug_on(sbi, addr == NULL_ADDR);
1216         if (addr == NEW_ADDR)
1217                 return;
1218
1219         /* add it into sit main buffer */
1220         mutex_lock(&sit_i->sentry_lock);
1221
1222         update_sit_entry(sbi, addr, -1);
1223
1224         /* add it into dirty seglist */
1225         locate_dirty_segment(sbi, segno);
1226
1227         mutex_unlock(&sit_i->sentry_lock);
1228 }
1229
1230 bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
1231 {
1232         struct sit_info *sit_i = SIT_I(sbi);
1233         unsigned int segno, offset;
1234         struct seg_entry *se;
1235         bool is_cp = false;
1236
1237         if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1238                 return true;
1239
1240         mutex_lock(&sit_i->sentry_lock);
1241
1242         segno = GET_SEGNO(sbi, blkaddr);
1243         se = get_seg_entry(sbi, segno);
1244         offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1245
1246         if (f2fs_test_bit(offset, se->ckpt_valid_map))
1247                 is_cp = true;
1248
1249         mutex_unlock(&sit_i->sentry_lock);
1250
1251         return is_cp;
1252 }
1253
1254 /*
1255  * This function should be resided under the curseg_mutex lock
1256  */
1257 static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
1258                                         struct f2fs_summary *sum)
1259 {
1260         struct curseg_info *curseg = CURSEG_I(sbi, type);
1261         void *addr = curseg->sum_blk;
1262         addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
1263         memcpy(addr, sum, sizeof(struct f2fs_summary));
1264 }
1265
1266 /*
1267  * Calculate the number of current summary pages for writing
1268  */
1269 int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
1270 {
1271         int valid_sum_count = 0;
1272         int i, sum_in_page;
1273
1274         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1275                 if (sbi->ckpt->alloc_type[i] == SSR)
1276                         valid_sum_count += sbi->blocks_per_seg;
1277                 else {
1278                         if (for_ra)
1279                                 valid_sum_count += le16_to_cpu(
1280                                         F2FS_CKPT(sbi)->cur_data_blkoff[i]);
1281                         else
1282                                 valid_sum_count += curseg_blkoff(sbi, i);
1283                 }
1284         }
1285
1286         sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
1287                         SUM_FOOTER_SIZE) / SUMMARY_SIZE;
1288         if (valid_sum_count <= sum_in_page)
1289                 return 1;
1290         else if ((valid_sum_count - sum_in_page) <=
1291                 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
1292                 return 2;
1293         return 3;
1294 }
1295
1296 /*
1297  * Caller should put this summary page
1298  */
1299 struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
1300 {
1301         return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
1302 }
1303
1304 void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
1305 {
1306         struct page *page = grab_meta_page(sbi, blk_addr);
1307         void *dst = page_address(page);
1308
1309         if (src)
1310                 memcpy(dst, src, PAGE_SIZE);
1311         else
1312                 memset(dst, 0, PAGE_SIZE);
1313         set_page_dirty(page);
1314         f2fs_put_page(page, 1);
1315 }
1316
1317 static void write_sum_page(struct f2fs_sb_info *sbi,
1318                         struct f2fs_summary_block *sum_blk, block_t blk_addr)
1319 {
1320         update_meta_page(sbi, (void *)sum_blk, blk_addr);
1321 }
1322
1323 static void write_current_sum_page(struct f2fs_sb_info *sbi,
1324                                                 int type, block_t blk_addr)
1325 {
1326         struct curseg_info *curseg = CURSEG_I(sbi, type);
1327         struct page *page = grab_meta_page(sbi, blk_addr);
1328         struct f2fs_summary_block *src = curseg->sum_blk;
1329         struct f2fs_summary_block *dst;
1330
1331         dst = (struct f2fs_summary_block *)page_address(page);
1332
1333         mutex_lock(&curseg->curseg_mutex);
1334
1335         down_read(&curseg->journal_rwsem);
1336         memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
1337         up_read(&curseg->journal_rwsem);
1338
1339         memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
1340         memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
1341
1342         mutex_unlock(&curseg->curseg_mutex);
1343
1344         set_page_dirty(page);
1345         f2fs_put_page(page, 1);
1346 }
1347
1348 /*
1349  * Find a new segment from the free segments bitmap to right order
1350  * This function should be returned with success, otherwise BUG
1351  */
1352 static void get_new_segment(struct f2fs_sb_info *sbi,
1353                         unsigned int *newseg, bool new_sec, int dir)
1354 {
1355         struct free_segmap_info *free_i = FREE_I(sbi);
1356         unsigned int segno, secno, zoneno;
1357         unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
1358         unsigned int hint = *newseg / sbi->segs_per_sec;
1359         unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
1360         unsigned int left_start = hint;
1361         bool init = true;
1362         int go_left = 0;
1363         int i;
1364
1365         spin_lock(&free_i->segmap_lock);
1366
1367         if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
1368                 segno = find_next_zero_bit(free_i->free_segmap,
1369                                 (hint + 1) * sbi->segs_per_sec, *newseg + 1);
1370                 if (segno < (hint + 1) * sbi->segs_per_sec)
1371                         goto got_it;
1372         }
1373 find_other_zone:
1374         secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
1375         if (secno >= MAIN_SECS(sbi)) {
1376                 if (dir == ALLOC_RIGHT) {
1377                         secno = find_next_zero_bit(free_i->free_secmap,
1378                                                         MAIN_SECS(sbi), 0);
1379                         f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
1380                 } else {
1381                         go_left = 1;
1382                         left_start = hint - 1;
1383                 }
1384         }
1385         if (go_left == 0)
1386                 goto skip_left;
1387
1388         while (test_bit(left_start, free_i->free_secmap)) {
1389                 if (left_start > 0) {
1390                         left_start--;
1391                         continue;
1392                 }
1393                 left_start = find_next_zero_bit(free_i->free_secmap,
1394                                                         MAIN_SECS(sbi), 0);
1395                 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
1396                 break;
1397         }
1398         secno = left_start;
1399 skip_left:
1400         hint = secno;
1401         segno = secno * sbi->segs_per_sec;
1402         zoneno = secno / sbi->secs_per_zone;
1403
1404         /* give up on finding another zone */
1405         if (!init)
1406                 goto got_it;
1407         if (sbi->secs_per_zone == 1)
1408                 goto got_it;
1409         if (zoneno == old_zoneno)
1410                 goto got_it;
1411         if (dir == ALLOC_LEFT) {
1412                 if (!go_left && zoneno + 1 >= total_zones)
1413                         goto got_it;
1414                 if (go_left && zoneno == 0)
1415                         goto got_it;
1416         }
1417         for (i = 0; i < NR_CURSEG_TYPE; i++)
1418                 if (CURSEG_I(sbi, i)->zone == zoneno)
1419                         break;
1420
1421         if (i < NR_CURSEG_TYPE) {
1422                 /* zone is in user, try another */
1423                 if (go_left)
1424                         hint = zoneno * sbi->secs_per_zone - 1;
1425                 else if (zoneno + 1 >= total_zones)
1426                         hint = 0;
1427                 else
1428                         hint = (zoneno + 1) * sbi->secs_per_zone;
1429                 init = false;
1430                 goto find_other_zone;
1431         }
1432 got_it:
1433         /* set it as dirty segment in free segmap */
1434         f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
1435         __set_inuse(sbi, segno);
1436         *newseg = segno;
1437         spin_unlock(&free_i->segmap_lock);
1438 }
1439
1440 static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
1441 {
1442         struct curseg_info *curseg = CURSEG_I(sbi, type);
1443         struct summary_footer *sum_footer;
1444
1445         curseg->segno = curseg->next_segno;
1446         curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
1447         curseg->next_blkoff = 0;
1448         curseg->next_segno = NULL_SEGNO;
1449
1450         sum_footer = &(curseg->sum_blk->footer);
1451         memset(sum_footer, 0, sizeof(struct summary_footer));
1452         if (IS_DATASEG(type))
1453                 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1454         if (IS_NODESEG(type))
1455                 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1456         __set_sit_entry_type(sbi, type, curseg->segno, modified);
1457 }
1458
1459 /*
1460  * Allocate a current working segment.
1461  * This function always allocates a free segment in LFS manner.
1462  */
1463 static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
1464 {
1465         struct curseg_info *curseg = CURSEG_I(sbi, type);
1466         unsigned int segno = curseg->segno;
1467         int dir = ALLOC_LEFT;
1468
1469         write_sum_page(sbi, curseg->sum_blk,
1470                                 GET_SUM_BLOCK(sbi, segno));
1471         if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
1472                 dir = ALLOC_RIGHT;
1473
1474         if (test_opt(sbi, NOHEAP))
1475                 dir = ALLOC_RIGHT;
1476
1477         get_new_segment(sbi, &segno, new_sec, dir);
1478         curseg->next_segno = segno;
1479         reset_curseg(sbi, type, 1);
1480         curseg->alloc_type = LFS;
1481 }
1482
1483 static void __next_free_blkoff(struct f2fs_sb_info *sbi,
1484                         struct curseg_info *seg, block_t start)
1485 {
1486         struct seg_entry *se = get_seg_entry(sbi, seg->segno);
1487         int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1488         unsigned long *target_map = SIT_I(sbi)->tmp_map;
1489         unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
1490         unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1491         int i, pos;
1492
1493         for (i = 0; i < entries; i++)
1494                 target_map[i] = ckpt_map[i] | cur_map[i];
1495
1496         pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
1497
1498         seg->next_blkoff = pos;
1499 }
1500
1501 /*
1502  * If a segment is written by LFS manner, next block offset is just obtained
1503  * by increasing the current block offset. However, if a segment is written by
1504  * SSR manner, next block offset obtained by calling __next_free_blkoff
1505  */
1506 static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
1507                                 struct curseg_info *seg)
1508 {
1509         if (seg->alloc_type == SSR)
1510                 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
1511         else
1512                 seg->next_blkoff++;
1513 }
1514
1515 /*
1516  * This function always allocates a used segment(from dirty seglist) by SSR
1517  * manner, so it should recover the existing segment information of valid blocks
1518  */
1519 static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
1520 {
1521         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
1522         struct curseg_info *curseg = CURSEG_I(sbi, type);
1523         unsigned int new_segno = curseg->next_segno;
1524         struct f2fs_summary_block *sum_node;
1525         struct page *sum_page;
1526
1527         write_sum_page(sbi, curseg->sum_blk,
1528                                 GET_SUM_BLOCK(sbi, curseg->segno));
1529         __set_test_and_inuse(sbi, new_segno);
1530
1531         mutex_lock(&dirty_i->seglist_lock);
1532         __remove_dirty_segment(sbi, new_segno, PRE);
1533         __remove_dirty_segment(sbi, new_segno, DIRTY);
1534         mutex_unlock(&dirty_i->seglist_lock);
1535
1536         reset_curseg(sbi, type, 1);
1537         curseg->alloc_type = SSR;
1538         __next_free_blkoff(sbi, curseg, 0);
1539
1540         if (reuse) {
1541                 sum_page = get_sum_page(sbi, new_segno);
1542                 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
1543                 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
1544                 f2fs_put_page(sum_page, 1);
1545         }
1546 }
1547
1548 static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
1549 {
1550         struct curseg_info *curseg = CURSEG_I(sbi, type);
1551         const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
1552         int i, cnt;
1553         bool reversed = false;
1554
1555         /* need_SSR() already forces to do this */
1556         if (v_ops->get_victim(sbi, &(curseg)->next_segno, BG_GC, type, SSR))
1557                 return 1;
1558
1559         /* For node segments, let's do SSR more intensively */
1560         if (IS_NODESEG(type)) {
1561                 if (type >= CURSEG_WARM_NODE) {
1562                         reversed = true;
1563                         i = CURSEG_COLD_NODE;
1564                 } else {
1565                         i = CURSEG_HOT_NODE;
1566                 }
1567                 cnt = NR_CURSEG_NODE_TYPE;
1568         } else {
1569                 if (type >= CURSEG_WARM_DATA) {
1570                         reversed = true;
1571                         i = CURSEG_COLD_DATA;
1572                 } else {
1573                         i = CURSEG_HOT_DATA;
1574                 }
1575                 cnt = NR_CURSEG_DATA_TYPE;
1576         }
1577
1578         for (; cnt-- > 0; reversed ? i-- : i++) {
1579                 if (i == type)
1580                         continue;
1581                 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1582                                                 BG_GC, i, SSR))
1583                         return 1;
1584         }
1585         return 0;
1586 }
1587
1588 /*
1589  * flush out current segment and replace it with new segment
1590  * This function should be returned with success, otherwise BUG
1591  */
1592 static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1593                                                 int type, bool force)
1594 {
1595         if (force)
1596                 new_curseg(sbi, type, true);
1597         else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
1598                                         type == CURSEG_WARM_NODE)
1599                 new_curseg(sbi, type, false);
1600         else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1601                 change_curseg(sbi, type, true);
1602         else
1603                 new_curseg(sbi, type, false);
1604
1605         stat_inc_seg_type(sbi, CURSEG_I(sbi, type));
1606 }
1607
1608 void allocate_new_segments(struct f2fs_sb_info *sbi)
1609 {
1610         struct curseg_info *curseg;
1611         unsigned int old_segno;
1612         int i;
1613
1614         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1615                 curseg = CURSEG_I(sbi, i);
1616                 old_segno = curseg->segno;
1617                 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
1618                 locate_dirty_segment(sbi, old_segno);
1619         }
1620 }
1621
1622 static const struct segment_allocation default_salloc_ops = {
1623         .allocate_segment = allocate_segment_by_default,
1624 };
1625
1626 bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1627 {
1628         __u64 trim_start = cpc->trim_start;
1629         bool has_candidate = false;
1630
1631         mutex_lock(&SIT_I(sbi)->sentry_lock);
1632         for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
1633                 if (add_discard_addrs(sbi, cpc, true)) {
1634                         has_candidate = true;
1635                         break;
1636                 }
1637         }
1638         mutex_unlock(&SIT_I(sbi)->sentry_lock);
1639
1640         cpc->trim_start = trim_start;
1641         return has_candidate;
1642 }
1643
1644 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1645 {
1646         __u64 start = F2FS_BYTES_TO_BLK(range->start);
1647         __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
1648         unsigned int start_segno, end_segno;
1649         struct cp_control cpc;
1650         int err = 0;
1651
1652         if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
1653                 return -EINVAL;
1654
1655         cpc.trimmed = 0;
1656         if (end <= MAIN_BLKADDR(sbi))
1657                 goto out;
1658
1659         if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1660                 f2fs_msg(sbi->sb, KERN_WARNING,
1661                         "Found FS corruption, run fsck to fix.");
1662                 goto out;
1663         }
1664
1665         /* start/end segment number in main_area */
1666         start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1667         end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1668                                                 GET_SEGNO(sbi, end);
1669         cpc.reason = CP_DISCARD;
1670         cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
1671
1672         /* do checkpoint to issue discard commands safely */
1673         for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1674                 cpc.trim_start = start_segno;
1675
1676                 if (sbi->discard_blks == 0)
1677                         break;
1678                 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
1679                         cpc.trim_end = end_segno;
1680                 else
1681                         cpc.trim_end = min_t(unsigned int,
1682                                 rounddown(start_segno +
1683                                 BATCHED_TRIM_SEGMENTS(sbi),
1684                                 sbi->segs_per_sec) - 1, end_segno);
1685
1686                 mutex_lock(&sbi->gc_mutex);
1687                 err = write_checkpoint(sbi, &cpc);
1688                 mutex_unlock(&sbi->gc_mutex);
1689                 if (err)
1690                         break;
1691
1692                 schedule();
1693         }
1694 out:
1695         range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
1696         return err;
1697 }
1698
1699 static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1700 {
1701         struct curseg_info *curseg = CURSEG_I(sbi, type);
1702         if (curseg->next_blkoff < sbi->blocks_per_seg)
1703                 return true;
1704         return false;
1705 }
1706
1707 static int __get_segment_type_2(struct page *page, enum page_type p_type)
1708 {
1709         if (p_type == DATA)
1710                 return CURSEG_HOT_DATA;
1711         else
1712                 return CURSEG_HOT_NODE;
1713 }
1714
1715 static int __get_segment_type_4(struct page *page, enum page_type p_type)
1716 {
1717         if (p_type == DATA) {
1718                 struct inode *inode = page->mapping->host;
1719
1720                 if (S_ISDIR(inode->i_mode))
1721                         return CURSEG_HOT_DATA;
1722                 else
1723                         return CURSEG_COLD_DATA;
1724         } else {
1725                 if (IS_DNODE(page) && is_cold_node(page))
1726                         return CURSEG_WARM_NODE;
1727                 else
1728                         return CURSEG_COLD_NODE;
1729         }
1730 }
1731
1732 static int __get_segment_type_6(struct page *page, enum page_type p_type)
1733 {
1734         if (p_type == DATA) {
1735                 struct inode *inode = page->mapping->host;
1736
1737                 if (S_ISDIR(inode->i_mode))
1738                         return CURSEG_HOT_DATA;
1739                 else if (is_cold_data(page) || file_is_cold(inode))
1740                         return CURSEG_COLD_DATA;
1741                 else
1742                         return CURSEG_WARM_DATA;
1743         } else {
1744                 if (IS_DNODE(page))
1745                         return is_cold_node(page) ? CURSEG_WARM_NODE :
1746                                                 CURSEG_HOT_NODE;
1747                 else
1748                         return CURSEG_COLD_NODE;
1749         }
1750 }
1751
1752 static int __get_segment_type(struct page *page, enum page_type p_type)
1753 {
1754         switch (F2FS_P_SB(page)->active_logs) {
1755         case 2:
1756                 return __get_segment_type_2(page, p_type);
1757         case 4:
1758                 return __get_segment_type_4(page, p_type);
1759         }
1760         /* NR_CURSEG_TYPE(6) logs by default */
1761         f2fs_bug_on(F2FS_P_SB(page),
1762                 F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
1763         return __get_segment_type_6(page, p_type);
1764 }
1765
1766 void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1767                 block_t old_blkaddr, block_t *new_blkaddr,
1768                 struct f2fs_summary *sum, int type)
1769 {
1770         struct sit_info *sit_i = SIT_I(sbi);
1771         struct curseg_info *curseg = CURSEG_I(sbi, type);
1772
1773         mutex_lock(&curseg->curseg_mutex);
1774         mutex_lock(&sit_i->sentry_lock);
1775
1776         *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
1777
1778         f2fs_wait_discard_bio(sbi, *new_blkaddr);
1779
1780         /*
1781          * __add_sum_entry should be resided under the curseg_mutex
1782          * because, this function updates a summary entry in the
1783          * current summary block.
1784          */
1785         __add_sum_entry(sbi, type, sum);
1786
1787         __refresh_next_blkoff(sbi, curseg);
1788
1789         stat_inc_block_count(sbi, curseg);
1790
1791         /*
1792          * SIT information should be updated before segment allocation,
1793          * since SSR needs latest valid block information.
1794          */
1795         refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
1796
1797         if (!__has_curseg_space(sbi, type))
1798                 sit_i->s_ops->allocate_segment(sbi, type, false);
1799
1800         mutex_unlock(&sit_i->sentry_lock);
1801
1802         if (page && IS_NODESEG(type))
1803                 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1804
1805         mutex_unlock(&curseg->curseg_mutex);
1806 }
1807
1808 static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
1809 {
1810         int type = __get_segment_type(fio->page, fio->type);
1811         int err;
1812
1813         if (fio->type == NODE || fio->type == DATA)
1814                 mutex_lock(&fio->sbi->wio_mutex[fio->type]);
1815 reallocate:
1816         allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
1817                                         &fio->new_blkaddr, sum, type);
1818
1819         /* writeout dirty page into bdev */
1820         err = f2fs_submit_page_mbio(fio);
1821         if (err == -EAGAIN) {
1822                 fio->old_blkaddr = fio->new_blkaddr;
1823                 goto reallocate;
1824         }
1825
1826         if (fio->type == NODE || fio->type == DATA)
1827                 mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
1828 }
1829
1830 void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
1831 {
1832         struct f2fs_io_info fio = {
1833                 .sbi = sbi,
1834                 .type = META,
1835                 .op = REQ_OP_WRITE,
1836                 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
1837                 .old_blkaddr = page->index,
1838                 .new_blkaddr = page->index,
1839                 .page = page,
1840                 .encrypted_page = NULL,
1841         };
1842
1843         if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
1844                 fio.op_flags &= ~REQ_META;
1845
1846         set_page_writeback(page);
1847         f2fs_submit_page_mbio(&fio);
1848 }
1849
1850 void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
1851 {
1852         struct f2fs_summary sum;
1853
1854         set_summary(&sum, nid, 0, 0);
1855         do_write_page(&sum, fio);
1856 }
1857
1858 void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
1859 {
1860         struct f2fs_sb_info *sbi = fio->sbi;
1861         struct f2fs_summary sum;
1862         struct node_info ni;
1863
1864         f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
1865         get_node_info(sbi, dn->nid, &ni);
1866         set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1867         do_write_page(&sum, fio);
1868         f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
1869 }
1870
1871 void rewrite_data_page(struct f2fs_io_info *fio)
1872 {
1873         fio->new_blkaddr = fio->old_blkaddr;
1874         stat_inc_inplace_blocks(fio->sbi);
1875         f2fs_submit_page_mbio(fio);
1876 }
1877
1878 void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1879                                 block_t old_blkaddr, block_t new_blkaddr,
1880                                 bool recover_curseg, bool recover_newaddr)
1881 {
1882         struct sit_info *sit_i = SIT_I(sbi);
1883         struct curseg_info *curseg;
1884         unsigned int segno, old_cursegno;
1885         struct seg_entry *se;
1886         int type;
1887         unsigned short old_blkoff;
1888
1889         segno = GET_SEGNO(sbi, new_blkaddr);
1890         se = get_seg_entry(sbi, segno);
1891         type = se->type;
1892
1893         if (!recover_curseg) {
1894                 /* for recovery flow */
1895                 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1896                         if (old_blkaddr == NULL_ADDR)
1897                                 type = CURSEG_COLD_DATA;
1898                         else
1899                                 type = CURSEG_WARM_DATA;
1900                 }
1901         } else {
1902                 if (!IS_CURSEG(sbi, segno))
1903                         type = CURSEG_WARM_DATA;
1904         }
1905
1906         curseg = CURSEG_I(sbi, type);
1907
1908         mutex_lock(&curseg->curseg_mutex);
1909         mutex_lock(&sit_i->sentry_lock);
1910
1911         old_cursegno = curseg->segno;
1912         old_blkoff = curseg->next_blkoff;
1913
1914         /* change the current segment */
1915         if (segno != curseg->segno) {
1916                 curseg->next_segno = segno;
1917                 change_curseg(sbi, type, true);
1918         }
1919
1920         curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
1921         __add_sum_entry(sbi, type, sum);
1922
1923         if (!recover_curseg || recover_newaddr)
1924                 update_sit_entry(sbi, new_blkaddr, 1);
1925         if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1926                 update_sit_entry(sbi, old_blkaddr, -1);
1927
1928         locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
1929         locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
1930
1931         locate_dirty_segment(sbi, old_cursegno);
1932
1933         if (recover_curseg) {
1934                 if (old_cursegno != curseg->segno) {
1935                         curseg->next_segno = old_cursegno;
1936                         change_curseg(sbi, type, true);
1937                 }
1938                 curseg->next_blkoff = old_blkoff;
1939         }
1940
1941         mutex_unlock(&sit_i->sentry_lock);
1942         mutex_unlock(&curseg->curseg_mutex);
1943 }
1944
1945 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
1946                                 block_t old_addr, block_t new_addr,
1947                                 unsigned char version, bool recover_curseg,
1948                                 bool recover_newaddr)
1949 {
1950         struct f2fs_summary sum;
1951
1952         set_summary(&sum, dn->nid, dn->ofs_in_node, version);
1953
1954         __f2fs_replace_block(sbi, &sum, old_addr, new_addr,
1955                                         recover_curseg, recover_newaddr);
1956
1957         f2fs_update_data_blkaddr(dn, new_addr);
1958 }
1959
1960 void f2fs_wait_on_page_writeback(struct page *page,
1961                                 enum page_type type, bool ordered)
1962 {
1963         if (PageWriteback(page)) {
1964                 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1965
1966                 f2fs_submit_merged_bio_cond(sbi, page->mapping->host,
1967                                                 0, page->index, type, WRITE);
1968                 if (ordered)
1969                         wait_on_page_writeback(page);
1970                 else
1971                         wait_for_stable_page(page);
1972         }
1973 }
1974
1975 void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
1976                                                         block_t blkaddr)
1977 {
1978         struct page *cpage;
1979
1980         if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
1981                 return;
1982
1983         cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
1984         if (cpage) {
1985                 f2fs_wait_on_page_writeback(cpage, DATA, true);
1986                 f2fs_put_page(cpage, 1);
1987         }
1988 }
1989
1990 static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1991 {
1992         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1993         struct curseg_info *seg_i;
1994         unsigned char *kaddr;
1995         struct page *page;
1996         block_t start;
1997         int i, j, offset;
1998
1999         start = start_sum_block(sbi);
2000
2001         page = get_meta_page(sbi, start++);
2002         kaddr = (unsigned char *)page_address(page);
2003
2004         /* Step 1: restore nat cache */
2005         seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2006         memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
2007
2008         /* Step 2: restore sit cache */
2009         seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2010         memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
2011         offset = 2 * SUM_JOURNAL_SIZE;
2012
2013         /* Step 3: restore summary entries */
2014         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2015                 unsigned short blk_off;
2016                 unsigned int segno;
2017
2018                 seg_i = CURSEG_I(sbi, i);
2019                 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
2020                 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
2021                 seg_i->next_segno = segno;
2022                 reset_curseg(sbi, i, 0);
2023                 seg_i->alloc_type = ckpt->alloc_type[i];
2024                 seg_i->next_blkoff = blk_off;
2025
2026                 if (seg_i->alloc_type == SSR)
2027                         blk_off = sbi->blocks_per_seg;
2028
2029                 for (j = 0; j < blk_off; j++) {
2030                         struct f2fs_summary *s;
2031                         s = (struct f2fs_summary *)(kaddr + offset);
2032                         seg_i->sum_blk->entries[j] = *s;
2033                         offset += SUMMARY_SIZE;
2034                         if (offset + SUMMARY_SIZE <= PAGE_SIZE -
2035                                                 SUM_FOOTER_SIZE)
2036                                 continue;
2037
2038                         f2fs_put_page(page, 1);
2039                         page = NULL;
2040
2041                         page = get_meta_page(sbi, start++);
2042                         kaddr = (unsigned char *)page_address(page);
2043                         offset = 0;
2044                 }
2045         }
2046         f2fs_put_page(page, 1);
2047         return 0;
2048 }
2049
2050 static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
2051 {
2052         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2053         struct f2fs_summary_block *sum;
2054         struct curseg_info *curseg;
2055         struct page *new;
2056         unsigned short blk_off;
2057         unsigned int segno = 0;
2058         block_t blk_addr = 0;
2059
2060         /* get segment number and block addr */
2061         if (IS_DATASEG(type)) {
2062                 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
2063                 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
2064                                                         CURSEG_HOT_DATA]);
2065                 if (__exist_node_summaries(sbi))
2066                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
2067                 else
2068                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
2069         } else {
2070                 segno = le32_to_cpu(ckpt->cur_node_segno[type -
2071                                                         CURSEG_HOT_NODE]);
2072                 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
2073                                                         CURSEG_HOT_NODE]);
2074                 if (__exist_node_summaries(sbi))
2075                         blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
2076                                                         type - CURSEG_HOT_NODE);
2077                 else
2078                         blk_addr = GET_SUM_BLOCK(sbi, segno);
2079         }
2080
2081         new = get_meta_page(sbi, blk_addr);
2082         sum = (struct f2fs_summary_block *)page_address(new);
2083
2084         if (IS_NODESEG(type)) {
2085                 if (__exist_node_summaries(sbi)) {
2086                         struct f2fs_summary *ns = &sum->entries[0];
2087                         int i;
2088                         for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
2089                                 ns->version = 0;
2090                                 ns->ofs_in_node = 0;
2091                         }
2092                 } else {
2093                         int err;
2094
2095                         err = restore_node_summary(sbi, segno, sum);
2096                         if (err) {
2097                                 f2fs_put_page(new, 1);
2098                                 return err;
2099                         }
2100                 }
2101         }
2102
2103         /* set uncompleted segment to curseg */
2104         curseg = CURSEG_I(sbi, type);
2105         mutex_lock(&curseg->curseg_mutex);
2106
2107         /* update journal info */
2108         down_write(&curseg->journal_rwsem);
2109         memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
2110         up_write(&curseg->journal_rwsem);
2111
2112         memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
2113         memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
2114         curseg->next_segno = segno;
2115         reset_curseg(sbi, type, 0);
2116         curseg->alloc_type = ckpt->alloc_type[type];
2117         curseg->next_blkoff = blk_off;
2118         mutex_unlock(&curseg->curseg_mutex);
2119         f2fs_put_page(new, 1);
2120         return 0;
2121 }
2122
2123 static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
2124 {
2125         int type = CURSEG_HOT_DATA;
2126         int err;
2127
2128         if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
2129                 int npages = npages_for_summary_flush(sbi, true);
2130
2131                 if (npages >= 2)
2132                         ra_meta_pages(sbi, start_sum_block(sbi), npages,
2133                                                         META_CP, true);
2134
2135                 /* restore for compacted data summary */
2136                 if (read_compacted_summaries(sbi))
2137                         return -EINVAL;
2138                 type = CURSEG_HOT_NODE;
2139         }
2140
2141         if (__exist_node_summaries(sbi))
2142                 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
2143                                         NR_CURSEG_TYPE - type, META_CP, true);
2144
2145         for (; type <= CURSEG_COLD_NODE; type++) {
2146                 err = read_normal_summaries(sbi, type);
2147                 if (err)
2148                         return err;
2149         }
2150
2151         return 0;
2152 }
2153
2154 static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
2155 {
2156         struct page *page;
2157         unsigned char *kaddr;
2158         struct f2fs_summary *summary;
2159         struct curseg_info *seg_i;
2160         int written_size = 0;
2161         int i, j;
2162
2163         page = grab_meta_page(sbi, blkaddr++);
2164         kaddr = (unsigned char *)page_address(page);
2165
2166         /* Step 1: write nat cache */
2167         seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
2168         memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
2169         written_size += SUM_JOURNAL_SIZE;
2170
2171         /* Step 2: write sit cache */
2172         seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
2173         memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
2174         written_size += SUM_JOURNAL_SIZE;
2175
2176         /* Step 3: write summary entries */
2177         for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2178                 unsigned short blkoff;
2179                 seg_i = CURSEG_I(sbi, i);
2180                 if (sbi->ckpt->alloc_type[i] == SSR)
2181                         blkoff = sbi->blocks_per_seg;
2182                 else
2183                         blkoff = curseg_blkoff(sbi, i);
2184
2185                 for (j = 0; j < blkoff; j++) {
2186                         if (!page) {
2187                                 page = grab_meta_page(sbi, blkaddr++);
2188                                 kaddr = (unsigned char *)page_address(page);
2189                                 written_size = 0;
2190                         }
2191                         summary = (struct f2fs_summary *)(kaddr + written_size);
2192                         *summary = seg_i->sum_blk->entries[j];
2193                         written_size += SUMMARY_SIZE;
2194
2195                         if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
2196                                                         SUM_FOOTER_SIZE)
2197                                 continue;
2198
2199                         set_page_dirty(page);
2200                         f2fs_put_page(page, 1);
2201                         page = NULL;
2202                 }
2203         }
2204         if (page) {
2205                 set_page_dirty(page);
2206                 f2fs_put_page(page, 1);
2207         }
2208 }
2209
2210 static void write_normal_summaries(struct f2fs_sb_info *sbi,
2211                                         block_t blkaddr, int type)
2212 {
2213         int i, end;
2214         if (IS_DATASEG(type))
2215                 end = type + NR_CURSEG_DATA_TYPE;
2216         else
2217                 end = type + NR_CURSEG_NODE_TYPE;
2218
2219         for (i = type; i < end; i++)
2220                 write_current_sum_page(sbi, i, blkaddr + (i - type));
2221 }
2222
2223 void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2224 {
2225         if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
2226                 write_compacted_summaries(sbi, start_blk);
2227         else
2228                 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
2229 }
2230
2231 void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
2232 {
2233         write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
2234 }
2235
2236 int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
2237                                         unsigned int val, int alloc)
2238 {
2239         int i;
2240
2241         if (type == NAT_JOURNAL) {
2242                 for (i = 0; i < nats_in_cursum(journal); i++) {
2243                         if (le32_to_cpu(nid_in_journal(journal, i)) == val)
2244                                 return i;
2245                 }
2246                 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
2247                         return update_nats_in_cursum(journal, 1);
2248         } else if (type == SIT_JOURNAL) {
2249                 for (i = 0; i < sits_in_cursum(journal); i++)
2250                         if (le32_to_cpu(segno_in_journal(journal, i)) == val)
2251                                 return i;
2252                 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
2253                         return update_sits_in_cursum(journal, 1);
2254         }
2255         return -1;
2256 }
2257
2258 static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
2259                                         unsigned int segno)
2260 {
2261         return get_meta_page(sbi, current_sit_addr(sbi, segno));
2262 }
2263
2264 static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
2265                                         unsigned int start)
2266 {
2267         struct sit_info *sit_i = SIT_I(sbi);
2268         struct page *src_page, *dst_page;
2269         pgoff_t src_off, dst_off;
2270         void *src_addr, *dst_addr;
2271
2272         src_off = current_sit_addr(sbi, start);
2273         dst_off = next_sit_addr(sbi, src_off);
2274
2275         /* get current sit block page without lock */
2276         src_page = get_meta_page(sbi, src_off);
2277         dst_page = grab_meta_page(sbi, dst_off);
2278         f2fs_bug_on(sbi, PageDirty(src_page));
2279
2280         src_addr = page_address(src_page);
2281         dst_addr = page_address(dst_page);
2282         memcpy(dst_addr, src_addr, PAGE_SIZE);
2283
2284         set_page_dirty(dst_page);
2285         f2fs_put_page(src_page, 1);
2286
2287         set_to_next_sit(sit_i, start);
2288
2289         return dst_page;
2290 }
2291
2292 static struct sit_entry_set *grab_sit_entry_set(void)
2293 {
2294         struct sit_entry_set *ses =
2295                         f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
2296
2297         ses->entry_cnt = 0;
2298         INIT_LIST_HEAD(&ses->set_list);
2299         return ses;
2300 }
2301
2302 static void release_sit_entry_set(struct sit_entry_set *ses)
2303 {
2304         list_del(&ses->set_list);
2305         kmem_cache_free(sit_entry_set_slab, ses);
2306 }
2307
2308 static void adjust_sit_entry_set(struct sit_entry_set *ses,
2309                                                 struct list_head *head)
2310 {
2311         struct sit_entry_set *next = ses;
2312
2313         if (list_is_last(&ses->set_list, head))
2314                 return;
2315
2316         list_for_each_entry_continue(next, head, set_list)
2317                 if (ses->entry_cnt <= next->entry_cnt)
2318                         break;
2319
2320         list_move_tail(&ses->set_list, &next->set_list);
2321 }
2322
2323 static void add_sit_entry(unsigned int segno, struct list_head *head)
2324 {
2325         struct sit_entry_set *ses;
2326         unsigned int start_segno = START_SEGNO(segno);
2327
2328         list_for_each_entry(ses, head, set_list) {
2329                 if (ses->start_segno == start_segno) {
2330                         ses->entry_cnt++;
2331                         adjust_sit_entry_set(ses, head);
2332                         return;
2333                 }
2334         }
2335
2336         ses = grab_sit_entry_set();
2337
2338         ses->start_segno = start_segno;
2339         ses->entry_cnt++;
2340         list_add(&ses->set_list, head);
2341 }
2342
2343 static void add_sits_in_set(struct f2fs_sb_info *sbi)
2344 {
2345         struct f2fs_sm_info *sm_info = SM_I(sbi);
2346         struct list_head *set_list = &sm_info->sit_entry_set;
2347         unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
2348         unsigned int segno;
2349
2350         for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
2351                 add_sit_entry(segno, set_list);
2352 }
2353
2354 static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
2355 {
2356         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2357         struct f2fs_journal *journal = curseg->journal;
2358         int i;
2359
2360         down_write(&curseg->journal_rwsem);
2361         for (i = 0; i < sits_in_cursum(journal); i++) {
2362                 unsigned int segno;
2363                 bool dirtied;
2364
2365                 segno = le32_to_cpu(segno_in_journal(journal, i));
2366                 dirtied = __mark_sit_entry_dirty(sbi, segno);
2367
2368                 if (!dirtied)
2369                         add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
2370         }
2371         update_sits_in_cursum(journal, -i);
2372         up_write(&curseg->journal_rwsem);
2373 }
2374
2375 /*
2376  * CP calls this function, which flushes SIT entries including sit_journal,
2377  * and moves prefree segs to free segs.
2378  */
2379 void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2380 {
2381         struct sit_info *sit_i = SIT_I(sbi);
2382         unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
2383         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2384         struct f2fs_journal *journal = curseg->journal;
2385         struct sit_entry_set *ses, *tmp;
2386         struct list_head *head = &SM_I(sbi)->sit_entry_set;
2387         bool to_journal = true;
2388         struct seg_entry *se;
2389
2390         mutex_lock(&sit_i->sentry_lock);
2391
2392         if (!sit_i->dirty_sentries)
2393                 goto out;
2394
2395         /*
2396          * add and account sit entries of dirty bitmap in sit entry
2397          * set temporarily
2398          */
2399         add_sits_in_set(sbi);
2400
2401         /*
2402          * if there are no enough space in journal to store dirty sit
2403          * entries, remove all entries from journal and add and account
2404          * them in sit entry set.
2405          */
2406         if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
2407                 remove_sits_in_journal(sbi);
2408
2409         /*
2410          * there are two steps to flush sit entries:
2411          * #1, flush sit entries to journal in current cold data summary block.
2412          * #2, flush sit entries to sit page.
2413          */
2414         list_for_each_entry_safe(ses, tmp, head, set_list) {
2415                 struct page *page = NULL;
2416                 struct f2fs_sit_block *raw_sit = NULL;
2417                 unsigned int start_segno = ses->start_segno;
2418                 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
2419                                                 (unsigned long)MAIN_SEGS(sbi));
2420                 unsigned int segno = start_segno;
2421
2422                 if (to_journal &&
2423                         !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
2424                         to_journal = false;
2425
2426                 if (to_journal) {
2427                         down_write(&curseg->journal_rwsem);
2428                 } else {
2429                         page = get_next_sit_page(sbi, start_segno);
2430                         raw_sit = page_address(page);
2431                 }
2432
2433                 /* flush dirty sit entries in region of current sit set */
2434                 for_each_set_bit_from(segno, bitmap, end) {
2435                         int offset, sit_offset;
2436
2437                         se = get_seg_entry(sbi, segno);
2438
2439                         /* add discard candidates */
2440                         if (cpc->reason != CP_DISCARD) {
2441                                 cpc->trim_start = segno;
2442                                 add_discard_addrs(sbi, cpc, false);
2443                         }
2444
2445                         if (to_journal) {
2446                                 offset = lookup_journal_in_cursum(journal,
2447                                                         SIT_JOURNAL, segno, 1);
2448                                 f2fs_bug_on(sbi, offset < 0);
2449                                 segno_in_journal(journal, offset) =
2450                                                         cpu_to_le32(segno);
2451                                 seg_info_to_raw_sit(se,
2452                                         &sit_in_journal(journal, offset));
2453                         } else {
2454                                 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
2455                                 seg_info_to_raw_sit(se,
2456                                                 &raw_sit->entries[sit_offset]);
2457                         }
2458
2459                         __clear_bit(segno, bitmap);
2460                         sit_i->dirty_sentries--;
2461                         ses->entry_cnt--;
2462                 }
2463
2464                 if (to_journal)
2465                         up_write(&curseg->journal_rwsem);
2466                 else
2467                         f2fs_put_page(page, 1);
2468
2469                 f2fs_bug_on(sbi, ses->entry_cnt);
2470                 release_sit_entry_set(ses);
2471         }
2472
2473         f2fs_bug_on(sbi, !list_empty(head));
2474         f2fs_bug_on(sbi, sit_i->dirty_sentries);
2475 out:
2476         if (cpc->reason == CP_DISCARD) {
2477                 __u64 trim_start = cpc->trim_start;
2478
2479                 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
2480                         add_discard_addrs(sbi, cpc, false);
2481
2482                 cpc->trim_start = trim_start;
2483         }
2484         mutex_unlock(&sit_i->sentry_lock);
2485
2486         set_prefree_as_free_segments(sbi);
2487 }
2488
2489 static int build_sit_info(struct f2fs_sb_info *sbi)
2490 {
2491         struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2492         struct sit_info *sit_i;
2493         unsigned int sit_segs, start;
2494         char *src_bitmap;
2495         unsigned int bitmap_size;
2496
2497         /* allocate memory for SIT information */
2498         sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
2499         if (!sit_i)
2500                 return -ENOMEM;
2501
2502         SM_I(sbi)->sit_info = sit_i;
2503
2504         sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
2505                                         sizeof(struct seg_entry), GFP_KERNEL);
2506         if (!sit_i->sentries)
2507                 return -ENOMEM;
2508
2509         bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2510         sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2511         if (!sit_i->dirty_sentries_bitmap)
2512                 return -ENOMEM;
2513
2514         for (start = 0; start < MAIN_SEGS(sbi); start++) {
2515                 sit_i->sentries[start].cur_valid_map
2516                         = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2517                 sit_i->sentries[start].ckpt_valid_map
2518                         = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2519                 if (!sit_i->sentries[start].cur_valid_map ||
2520                                 !sit_i->sentries[start].ckpt_valid_map)
2521                         return -ENOMEM;
2522
2523 #ifdef CONFIG_F2FS_CHECK_FS
2524                 sit_i->sentries[start].cur_valid_map_mir
2525                         = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2526                 if (!sit_i->sentries[start].cur_valid_map_mir)
2527                         return -ENOMEM;
2528 #endif
2529
2530                 if (f2fs_discard_en(sbi)) {
2531                         sit_i->sentries[start].discard_map
2532                                 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2533                         if (!sit_i->sentries[start].discard_map)
2534                                 return -ENOMEM;
2535                 }
2536         }
2537
2538         sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
2539         if (!sit_i->tmp_map)
2540                 return -ENOMEM;
2541
2542         if (sbi->segs_per_sec > 1) {
2543                 sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
2544                                         sizeof(struct sec_entry), GFP_KERNEL);
2545                 if (!sit_i->sec_entries)
2546                         return -ENOMEM;
2547         }
2548
2549         /* get information related with SIT */
2550         sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
2551
2552         /* setup SIT bitmap from ckeckpoint pack */
2553         bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
2554         src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
2555
2556         sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2557         if (!sit_i->sit_bitmap)
2558                 return -ENOMEM;
2559
2560 #ifdef CONFIG_F2FS_CHECK_FS
2561         sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
2562         if (!sit_i->sit_bitmap_mir)
2563                 return -ENOMEM;
2564 #endif
2565
2566         /* init SIT information */
2567         sit_i->s_ops = &default_salloc_ops;
2568
2569         sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
2570         sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
2571         sit_i->written_valid_blocks = 0;
2572         sit_i->bitmap_size = bitmap_size;
2573         sit_i->dirty_sentries = 0;
2574         sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
2575         sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
2576         sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
2577         mutex_init(&sit_i->sentry_lock);
2578         return 0;
2579 }
2580
2581 static int build_free_segmap(struct f2fs_sb_info *sbi)
2582 {
2583         struct free_segmap_info *free_i;
2584         unsigned int bitmap_size, sec_bitmap_size;
2585
2586         /* allocate memory for free segmap information */
2587         free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
2588         if (!free_i)
2589                 return -ENOMEM;
2590
2591         SM_I(sbi)->free_info = free_i;
2592
2593         bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2594         free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
2595         if (!free_i->free_segmap)
2596                 return -ENOMEM;
2597
2598         sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2599         free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
2600         if (!free_i->free_secmap)
2601                 return -ENOMEM;
2602
2603         /* set all segments as dirty temporarily */
2604         memset(free_i->free_segmap, 0xff, bitmap_size);
2605         memset(free_i->free_secmap, 0xff, sec_bitmap_size);
2606
2607         /* init free segmap information */
2608         free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
2609         free_i->free_segments = 0;
2610         free_i->free_sections = 0;
2611         spin_lock_init(&free_i->segmap_lock);
2612         return 0;
2613 }
2614
2615 static int build_curseg(struct f2fs_sb_info *sbi)
2616 {
2617         struct curseg_info *array;
2618         int i;
2619
2620         array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
2621         if (!array)
2622                 return -ENOMEM;
2623
2624         SM_I(sbi)->curseg_array = array;
2625
2626         for (i = 0; i < NR_CURSEG_TYPE; i++) {
2627                 mutex_init(&array[i].curseg_mutex);
2628                 array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
2629                 if (!array[i].sum_blk)
2630                         return -ENOMEM;
2631                 init_rwsem(&array[i].journal_rwsem);
2632                 array[i].journal = kzalloc(sizeof(struct f2fs_journal),
2633                                                         GFP_KERNEL);
2634                 if (!array[i].journal)
2635                         return -ENOMEM;
2636                 array[i].segno = NULL_SEGNO;
2637                 array[i].next_blkoff = 0;
2638         }
2639         return restore_curseg_summaries(sbi);
2640 }
2641
2642 static void build_sit_entries(struct f2fs_sb_info *sbi)
2643 {
2644         struct sit_info *sit_i = SIT_I(sbi);
2645         struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2646         struct f2fs_journal *journal = curseg->journal;
2647         struct seg_entry *se;
2648         struct f2fs_sit_entry sit;
2649         int sit_blk_cnt = SIT_BLK_CNT(sbi);
2650         unsigned int i, start, end;
2651         unsigned int readed, start_blk = 0;
2652
2653         do {
2654                 readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
2655                                                         META_SIT, true);
2656
2657                 start = start_blk * sit_i->sents_per_block;
2658                 end = (start_blk + readed) * sit_i->sents_per_block;
2659
2660                 for (; start < end && start < MAIN_SEGS(sbi); start++) {
2661                         struct f2fs_sit_block *sit_blk;
2662                         struct page *page;
2663
2664                         se = &sit_i->sentries[start];
2665                         page = get_current_sit_page(sbi, start);
2666                         sit_blk = (struct f2fs_sit_block *)page_address(page);
2667                         sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
2668                         f2fs_put_page(page, 1);
2669
2670                         check_block_count(sbi, start, &sit);
2671                         seg_info_from_raw_sit(se, &sit);
2672
2673                         /* build discard map only one time */
2674                         if (f2fs_discard_en(sbi)) {
2675                                 memcpy(se->discard_map, se->cur_valid_map,
2676                                                         SIT_VBLOCK_MAP_SIZE);
2677                                 sbi->discard_blks += sbi->blocks_per_seg -
2678                                                         se->valid_blocks;
2679                         }
2680
2681                         if (sbi->segs_per_sec > 1)
2682                                 get_sec_entry(sbi, start)->valid_blocks +=
2683                                                         se->valid_blocks;
2684                 }
2685                 start_blk += readed;
2686         } while (start_blk < sit_blk_cnt);
2687
2688         down_read(&curseg->journal_rwsem);
2689         for (i = 0; i < sits_in_cursum(journal); i++) {
2690                 unsigned int old_valid_blocks;
2691
2692                 start = le32_to_cpu(segno_in_journal(journal, i));
2693                 se = &sit_i->sentries[start];
2694                 sit = sit_in_journal(journal, i);
2695
2696                 old_valid_blocks = se->valid_blocks;
2697
2698                 check_block_count(sbi, start, &sit);
2699                 seg_info_from_raw_sit(se, &sit);
2700
2701                 if (f2fs_discard_en(sbi)) {
2702                         memcpy(se->discard_map, se->cur_valid_map,
2703                                                 SIT_VBLOCK_MAP_SIZE);
2704                         sbi->discard_blks += old_valid_blocks -
2705                                                 se->valid_blocks;
2706                 }
2707
2708                 if (sbi->segs_per_sec > 1)
2709                         get_sec_entry(sbi, start)->valid_blocks +=
2710                                 se->valid_blocks - old_valid_blocks;
2711         }
2712         up_read(&curseg->journal_rwsem);
2713 }
2714
2715 static void init_free_segmap(struct f2fs_sb_info *sbi)
2716 {
2717         unsigned int start;
2718         int type;
2719
2720         for (start = 0; start < MAIN_SEGS(sbi); start++) {
2721                 struct seg_entry *sentry = get_seg_entry(sbi, start);
2722                 if (!sentry->valid_blocks)
2723                         __set_free(sbi, start);
2724                 else
2725                         SIT_I(sbi)->written_valid_blocks +=
2726                                                 sentry->valid_blocks;
2727         }
2728
2729         /* set use the current segments */
2730         for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2731                 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2732                 __set_test_and_inuse(sbi, curseg_t->segno);
2733         }
2734 }
2735
2736 static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2737 {
2738         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2739         struct free_segmap_info *free_i = FREE_I(sbi);
2740         unsigned int segno = 0, offset = 0;
2741         unsigned short valid_blocks;
2742
2743         while (1) {
2744                 /* find dirty segment based on free segmap */
2745                 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
2746                 if (segno >= MAIN_SEGS(sbi))
2747                         break;
2748                 offset = segno + 1;
2749                 valid_blocks = get_valid_blocks(sbi, segno, 0);
2750                 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
2751                         continue;
2752                 if (valid_blocks > sbi->blocks_per_seg) {
2753                         f2fs_bug_on(sbi, 1);
2754                         continue;
2755                 }
2756                 mutex_lock(&dirty_i->seglist_lock);
2757                 __locate_dirty_segment(sbi, segno, DIRTY);
2758                 mutex_unlock(&dirty_i->seglist_lock);
2759         }
2760 }
2761
2762 static int init_victim_secmap(struct f2fs_sb_info *sbi)
2763 {
2764         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2765         unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
2766
2767         dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2768         if (!dirty_i->victim_secmap)
2769                 return -ENOMEM;
2770         return 0;
2771 }
2772
2773 static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2774 {
2775         struct dirty_seglist_info *dirty_i;
2776         unsigned int bitmap_size, i;
2777
2778         /* allocate memory for dirty segments list information */
2779         dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2780         if (!dirty_i)
2781                 return -ENOMEM;
2782
2783         SM_I(sbi)->dirty_info = dirty_i;
2784         mutex_init(&dirty_i->seglist_lock);
2785
2786         bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
2787
2788         for (i = 0; i < NR_DIRTY_TYPE; i++) {
2789                 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
2790                 if (!dirty_i->dirty_segmap[i])
2791                         return -ENOMEM;
2792         }
2793
2794         init_dirty_segmap(sbi);
2795         return init_victim_secmap(sbi);
2796 }
2797
2798 /*
2799  * Update min, max modified time for cost-benefit GC algorithm
2800  */
2801 static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2802 {
2803         struct sit_info *sit_i = SIT_I(sbi);
2804         unsigned int segno;
2805
2806         mutex_lock(&sit_i->sentry_lock);
2807
2808         sit_i->min_mtime = LLONG_MAX;
2809
2810         for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
2811                 unsigned int i;
2812                 unsigned long long mtime = 0;
2813
2814                 for (i = 0; i < sbi->segs_per_sec; i++)
2815                         mtime += get_seg_entry(sbi, segno + i)->mtime;
2816
2817                 mtime = div_u64(mtime, sbi->segs_per_sec);
2818
2819                 if (sit_i->min_mtime > mtime)
2820                         sit_i->min_mtime = mtime;
2821         }
2822         sit_i->max_mtime = get_mtime(sbi);
2823         mutex_unlock(&sit_i->sentry_lock);
2824 }
2825
2826 int build_segment_manager(struct f2fs_sb_info *sbi)
2827 {
2828         struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2829         struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2830         struct f2fs_sm_info *sm_info;
2831         int err;
2832
2833         sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2834         if (!sm_info)
2835                 return -ENOMEM;
2836
2837         /* init sm info */
2838         sbi->sm_info = sm_info;
2839         sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2840         sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2841         sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2842         sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2843         sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2844         sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2845         sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
2846         sm_info->rec_prefree_segments = sm_info->main_segments *
2847                                         DEF_RECLAIM_PREFREE_SEGMENTS / 100;
2848         if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
2849                 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
2850
2851         if (!test_opt(sbi, LFS))
2852                 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
2853         sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
2854         sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
2855
2856         sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2857
2858         INIT_LIST_HEAD(&sm_info->sit_entry_set);
2859
2860         if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2861                 err = create_flush_cmd_control(sbi);
2862                 if (err)
2863                         return err;
2864         }
2865
2866         err = create_discard_cmd_control(sbi);
2867         if (err)
2868                 return err;
2869
2870         err = build_sit_info(sbi);
2871         if (err)
2872                 return err;
2873         err = build_free_segmap(sbi);
2874         if (err)
2875                 return err;
2876         err = build_curseg(sbi);
2877         if (err)
2878                 return err;
2879
2880         /* reinit free segmap based on SIT */
2881         build_sit_entries(sbi);
2882
2883         init_free_segmap(sbi);
2884         err = build_dirty_segmap(sbi);
2885         if (err)
2886                 return err;
2887
2888         init_min_max_mtime(sbi);
2889         return 0;
2890 }
2891
2892 static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2893                 enum dirty_type dirty_type)
2894 {
2895         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2896
2897         mutex_lock(&dirty_i->seglist_lock);
2898         kvfree(dirty_i->dirty_segmap[dirty_type]);
2899         dirty_i->nr_dirty[dirty_type] = 0;
2900         mutex_unlock(&dirty_i->seglist_lock);
2901 }
2902
2903 static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
2904 {
2905         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2906         kvfree(dirty_i->victim_secmap);
2907 }
2908
2909 static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2910 {
2911         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2912         int i;
2913
2914         if (!dirty_i)
2915                 return;
2916
2917         /* discard pre-free/dirty segments list */
2918         for (i = 0; i < NR_DIRTY_TYPE; i++)
2919                 discard_dirty_segmap(sbi, i);
2920
2921         destroy_victim_secmap(sbi);
2922         SM_I(sbi)->dirty_info = NULL;
2923         kfree(dirty_i);
2924 }
2925
2926 static void destroy_curseg(struct f2fs_sb_info *sbi)
2927 {
2928         struct curseg_info *array = SM_I(sbi)->curseg_array;
2929         int i;
2930
2931         if (!array)
2932                 return;
2933         SM_I(sbi)->curseg_array = NULL;
2934         for (i = 0; i < NR_CURSEG_TYPE; i++) {
2935                 kfree(array[i].sum_blk);
2936                 kfree(array[i].journal);
2937         }
2938         kfree(array);
2939 }
2940
2941 static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2942 {
2943         struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2944         if (!free_i)
2945                 return;
2946         SM_I(sbi)->free_info = NULL;
2947         kvfree(free_i->free_segmap);
2948         kvfree(free_i->free_secmap);
2949         kfree(free_i);
2950 }
2951
2952 static void destroy_sit_info(struct f2fs_sb_info *sbi)
2953 {
2954         struct sit_info *sit_i = SIT_I(sbi);
2955         unsigned int start;
2956
2957         if (!sit_i)
2958                 return;
2959
2960         if (sit_i->sentries) {
2961                 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2962                         kfree(sit_i->sentries[start].cur_valid_map);
2963 #ifdef CONFIG_F2FS_CHECK_FS
2964                         kfree(sit_i->sentries[start].cur_valid_map_mir);
2965 #endif
2966                         kfree(sit_i->sentries[start].ckpt_valid_map);
2967                         kfree(sit_i->sentries[start].discard_map);
2968                 }
2969         }
2970         kfree(sit_i->tmp_map);
2971
2972         kvfree(sit_i->sentries);
2973         kvfree(sit_i->sec_entries);
2974         kvfree(sit_i->dirty_sentries_bitmap);
2975
2976         SM_I(sbi)->sit_info = NULL;
2977         kfree(sit_i->sit_bitmap);
2978 #ifdef CONFIG_F2FS_CHECK_FS
2979         kfree(sit_i->sit_bitmap_mir);
2980 #endif
2981         kfree(sit_i);
2982 }
2983
2984 void destroy_segment_manager(struct f2fs_sb_info *sbi)
2985 {
2986         struct f2fs_sm_info *sm_info = SM_I(sbi);
2987
2988         if (!sm_info)
2989                 return;
2990         destroy_flush_cmd_control(sbi, true);
2991         destroy_discard_cmd_control(sbi, true);
2992         destroy_dirty_segmap(sbi);
2993         destroy_curseg(sbi);
2994         destroy_free_segmap(sbi);
2995         destroy_sit_info(sbi);
2996         sbi->sm_info = NULL;
2997         kfree(sm_info);
2998 }
2999
3000 int __init create_segment_manager_caches(void)
3001 {
3002         discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
3003                         sizeof(struct discard_entry));
3004         if (!discard_entry_slab)
3005                 goto fail;
3006
3007         discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
3008                         sizeof(struct discard_cmd));
3009         if (!discard_cmd_slab)
3010                 goto destroy_discard_entry;
3011
3012         sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
3013                         sizeof(struct sit_entry_set));
3014         if (!sit_entry_set_slab)
3015                 goto destroy_discard_cmd;
3016
3017         inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
3018                         sizeof(struct inmem_pages));
3019         if (!inmem_entry_slab)
3020                 goto destroy_sit_entry_set;
3021         return 0;
3022
3023 destroy_sit_entry_set:
3024         kmem_cache_destroy(sit_entry_set_slab);
3025 destroy_discard_cmd:
3026         kmem_cache_destroy(discard_cmd_slab);
3027 destroy_discard_entry:
3028         kmem_cache_destroy(discard_entry_slab);
3029 fail:
3030         return -ENOMEM;
3031 }
3032
3033 void destroy_segment_manager_caches(void)
3034 {
3035         kmem_cache_destroy(sit_entry_set_slab);
3036         kmem_cache_destroy(discard_cmd_slab);
3037         kmem_cache_destroy(discard_entry_slab);
3038         kmem_cache_destroy(inmem_entry_slab);
3039 }