]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/f2fs/gc.c
Merge tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux...
[karo-tx-linux.git] / fs / f2fs / gc.c
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
20
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
26
27 static int gc_thread_func(void *data)
28 {
29         struct f2fs_sb_info *sbi = data;
30         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
31         wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
32         long wait_ms;
33
34         wait_ms = gc_th->min_sleep_time;
35
36         do {
37                 if (try_to_freeze())
38                         continue;
39                 else
40                         wait_event_interruptible_timeout(*wq,
41                                                 kthread_should_stop(),
42                                                 msecs_to_jiffies(wait_ms));
43                 if (kthread_should_stop())
44                         break;
45
46                 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
47                         increase_sleep_time(gc_th, &wait_ms);
48                         continue;
49                 }
50
51                 /*
52                  * [GC triggering condition]
53                  * 0. GC is not conducted currently.
54                  * 1. There are enough dirty segments.
55                  * 2. IO subsystem is idle by checking the # of writeback pages.
56                  * 3. IO subsystem is idle by checking the # of requests in
57                  *    bdev's request list.
58                  *
59                  * Note) We have to avoid triggering GCs frequently.
60                  * Because it is possible that some segments can be
61                  * invalidated soon after by user update or deletion.
62                  * So, I'd like to wait some time to collect dirty segments.
63                  */
64                 if (!mutex_trylock(&sbi->gc_mutex))
65                         continue;
66
67                 if (!is_idle(sbi)) {
68                         increase_sleep_time(gc_th, &wait_ms);
69                         mutex_unlock(&sbi->gc_mutex);
70                         continue;
71                 }
72
73                 if (has_enough_invalid_blocks(sbi))
74                         decrease_sleep_time(gc_th, &wait_ms);
75                 else
76                         increase_sleep_time(gc_th, &wait_ms);
77
78                 stat_inc_bggc_count(sbi);
79
80                 /* if return value is not zero, no victim was selected */
81                 if (f2fs_gc(sbi))
82                         wait_ms = gc_th->no_gc_sleep_time;
83
84                 /* balancing f2fs's metadata periodically */
85                 f2fs_balance_fs_bg(sbi);
86
87         } while (!kthread_should_stop());
88         return 0;
89 }
90
91 int start_gc_thread(struct f2fs_sb_info *sbi)
92 {
93         struct f2fs_gc_kthread *gc_th;
94         dev_t dev = sbi->sb->s_bdev->bd_dev;
95         int err = 0;
96
97         gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
98         if (!gc_th) {
99                 err = -ENOMEM;
100                 goto out;
101         }
102
103         gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
104         gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
105         gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
106
107         gc_th->gc_idle = 0;
108
109         sbi->gc_thread = gc_th;
110         init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
111         sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
112                         "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
113         if (IS_ERR(gc_th->f2fs_gc_task)) {
114                 err = PTR_ERR(gc_th->f2fs_gc_task);
115                 kfree(gc_th);
116                 sbi->gc_thread = NULL;
117         }
118 out:
119         return err;
120 }
121
122 void stop_gc_thread(struct f2fs_sb_info *sbi)
123 {
124         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
125         if (!gc_th)
126                 return;
127         kthread_stop(gc_th->f2fs_gc_task);
128         kfree(gc_th);
129         sbi->gc_thread = NULL;
130 }
131
132 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
133 {
134         int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
135
136         if (gc_th && gc_th->gc_idle) {
137                 if (gc_th->gc_idle == 1)
138                         gc_mode = GC_CB;
139                 else if (gc_th->gc_idle == 2)
140                         gc_mode = GC_GREEDY;
141         }
142         return gc_mode;
143 }
144
145 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
146                         int type, struct victim_sel_policy *p)
147 {
148         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
149
150         if (p->alloc_mode == SSR) {
151                 p->gc_mode = GC_GREEDY;
152                 p->dirty_segmap = dirty_i->dirty_segmap[type];
153                 p->max_search = dirty_i->nr_dirty[type];
154                 p->ofs_unit = 1;
155         } else {
156                 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
157                 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
158                 p->max_search = dirty_i->nr_dirty[DIRTY];
159                 p->ofs_unit = sbi->segs_per_sec;
160         }
161
162         if (p->max_search > sbi->max_victim_search)
163                 p->max_search = sbi->max_victim_search;
164
165         p->offset = sbi->last_victim[p->gc_mode];
166 }
167
168 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
169                                 struct victim_sel_policy *p)
170 {
171         /* SSR allocates in a segment unit */
172         if (p->alloc_mode == SSR)
173                 return 1 << sbi->log_blocks_per_seg;
174         if (p->gc_mode == GC_GREEDY)
175                 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
176         else if (p->gc_mode == GC_CB)
177                 return UINT_MAX;
178         else /* No other gc_mode */
179                 return 0;
180 }
181
182 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
183 {
184         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
185         unsigned int secno;
186
187         /*
188          * If the gc_type is FG_GC, we can select victim segments
189          * selected by background GC before.
190          * Those segments guarantee they have small valid blocks.
191          */
192         for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
193                 if (sec_usage_check(sbi, secno))
194                         continue;
195                 clear_bit(secno, dirty_i->victim_secmap);
196                 return secno * sbi->segs_per_sec;
197         }
198         return NULL_SEGNO;
199 }
200
201 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
202 {
203         struct sit_info *sit_i = SIT_I(sbi);
204         unsigned int secno = GET_SECNO(sbi, segno);
205         unsigned int start = secno * sbi->segs_per_sec;
206         unsigned long long mtime = 0;
207         unsigned int vblocks;
208         unsigned char age = 0;
209         unsigned char u;
210         unsigned int i;
211
212         for (i = 0; i < sbi->segs_per_sec; i++)
213                 mtime += get_seg_entry(sbi, start + i)->mtime;
214         vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
215
216         mtime = div_u64(mtime, sbi->segs_per_sec);
217         vblocks = div_u64(vblocks, sbi->segs_per_sec);
218
219         u = (vblocks * 100) >> sbi->log_blocks_per_seg;
220
221         /* Handle if the system time has changed by the user */
222         if (mtime < sit_i->min_mtime)
223                 sit_i->min_mtime = mtime;
224         if (mtime > sit_i->max_mtime)
225                 sit_i->max_mtime = mtime;
226         if (sit_i->max_mtime != sit_i->min_mtime)
227                 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
228                                 sit_i->max_mtime - sit_i->min_mtime);
229
230         return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
231 }
232
233 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
234                         unsigned int segno, struct victim_sel_policy *p)
235 {
236         if (p->alloc_mode == SSR)
237                 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
238
239         /* alloc_mode == LFS */
240         if (p->gc_mode == GC_GREEDY)
241                 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
242         else
243                 return get_cb_cost(sbi, segno);
244 }
245
246 /*
247  * This function is called from two paths.
248  * One is garbage collection and the other is SSR segment selection.
249  * When it is called during GC, it just gets a victim segment
250  * and it does not remove it from dirty seglist.
251  * When it is called from SSR segment selection, it finds a segment
252  * which has minimum valid blocks and removes it from dirty seglist.
253  */
254 static int get_victim_by_default(struct f2fs_sb_info *sbi,
255                 unsigned int *result, int gc_type, int type, char alloc_mode)
256 {
257         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
258         struct victim_sel_policy p;
259         unsigned int secno, max_cost;
260         int nsearched = 0;
261
262         mutex_lock(&dirty_i->seglist_lock);
263
264         p.alloc_mode = alloc_mode;
265         select_policy(sbi, gc_type, type, &p);
266
267         p.min_segno = NULL_SEGNO;
268         p.min_cost = max_cost = get_max_cost(sbi, &p);
269
270         if (p.alloc_mode == LFS && gc_type == FG_GC) {
271                 p.min_segno = check_bg_victims(sbi);
272                 if (p.min_segno != NULL_SEGNO)
273                         goto got_it;
274         }
275
276         while (1) {
277                 unsigned long cost;
278                 unsigned int segno;
279
280                 segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
281                 if (segno >= MAIN_SEGS(sbi)) {
282                         if (sbi->last_victim[p.gc_mode]) {
283                                 sbi->last_victim[p.gc_mode] = 0;
284                                 p.offset = 0;
285                                 continue;
286                         }
287                         break;
288                 }
289
290                 p.offset = segno + p.ofs_unit;
291                 if (p.ofs_unit > 1)
292                         p.offset -= segno % p.ofs_unit;
293
294                 secno = GET_SECNO(sbi, segno);
295
296                 if (sec_usage_check(sbi, secno))
297                         continue;
298                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
299                         continue;
300
301                 cost = get_gc_cost(sbi, segno, &p);
302
303                 if (p.min_cost > cost) {
304                         p.min_segno = segno;
305                         p.min_cost = cost;
306                 } else if (unlikely(cost == max_cost)) {
307                         continue;
308                 }
309
310                 if (nsearched++ >= p.max_search) {
311                         sbi->last_victim[p.gc_mode] = segno;
312                         break;
313                 }
314         }
315         if (p.min_segno != NULL_SEGNO) {
316 got_it:
317                 if (p.alloc_mode == LFS) {
318                         secno = GET_SECNO(sbi, p.min_segno);
319                         if (gc_type == FG_GC)
320                                 sbi->cur_victim_sec = secno;
321                         else
322                                 set_bit(secno, dirty_i->victim_secmap);
323                 }
324                 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
325
326                 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
327                                 sbi->cur_victim_sec,
328                                 prefree_segments(sbi), free_segments(sbi));
329         }
330         mutex_unlock(&dirty_i->seglist_lock);
331
332         return (p.min_segno == NULL_SEGNO) ? 0 : 1;
333 }
334
335 static const struct victim_selection default_v_ops = {
336         .get_victim = get_victim_by_default,
337 };
338
339 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
340 {
341         struct inode_entry *ie;
342
343         ie = radix_tree_lookup(&gc_list->iroot, ino);
344         if (ie)
345                 return ie->inode;
346         return NULL;
347 }
348
349 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
350 {
351         struct inode_entry *new_ie;
352
353         if (inode == find_gc_inode(gc_list, inode->i_ino)) {
354                 iput(inode);
355                 return;
356         }
357         new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
358         new_ie->inode = inode;
359
360         f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
361         list_add_tail(&new_ie->list, &gc_list->ilist);
362 }
363
364 static void put_gc_inode(struct gc_inode_list *gc_list)
365 {
366         struct inode_entry *ie, *next_ie;
367         list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
368                 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
369                 iput(ie->inode);
370                 list_del(&ie->list);
371                 kmem_cache_free(inode_entry_slab, ie);
372         }
373 }
374
375 static int check_valid_map(struct f2fs_sb_info *sbi,
376                                 unsigned int segno, int offset)
377 {
378         struct sit_info *sit_i = SIT_I(sbi);
379         struct seg_entry *sentry;
380         int ret;
381
382         mutex_lock(&sit_i->sentry_lock);
383         sentry = get_seg_entry(sbi, segno);
384         ret = f2fs_test_bit(offset, sentry->cur_valid_map);
385         mutex_unlock(&sit_i->sentry_lock);
386         return ret;
387 }
388
389 /*
390  * This function compares node address got in summary with that in NAT.
391  * On validity, copy that node with cold status, otherwise (invalid node)
392  * ignore that.
393  */
394 static int gc_node_segment(struct f2fs_sb_info *sbi,
395                 struct f2fs_summary *sum, unsigned int segno, int gc_type)
396 {
397         bool initial = true;
398         struct f2fs_summary *entry;
399         block_t start_addr;
400         int off;
401
402         start_addr = START_BLOCK(sbi, segno);
403
404 next_step:
405         entry = sum;
406
407         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
408                 nid_t nid = le32_to_cpu(entry->nid);
409                 struct page *node_page;
410                 struct node_info ni;
411
412                 /* stop BG_GC if there is not enough free sections. */
413                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
414                         return 0;
415
416                 if (check_valid_map(sbi, segno, off) == 0)
417                         continue;
418
419                 if (initial) {
420                         ra_node_page(sbi, nid);
421                         continue;
422                 }
423                 node_page = get_node_page(sbi, nid);
424                 if (IS_ERR(node_page))
425                         continue;
426
427                 /* block may become invalid during get_node_page */
428                 if (check_valid_map(sbi, segno, off) == 0) {
429                         f2fs_put_page(node_page, 1);
430                         continue;
431                 }
432
433                 get_node_info(sbi, nid, &ni);
434                 if (ni.blk_addr != start_addr + off) {
435                         f2fs_put_page(node_page, 1);
436                         continue;
437                 }
438
439                 /* set page dirty and write it */
440                 if (gc_type == FG_GC) {
441                         f2fs_wait_on_page_writeback(node_page, NODE);
442                         set_page_dirty(node_page);
443                 } else {
444                         if (!PageWriteback(node_page))
445                                 set_page_dirty(node_page);
446                 }
447                 f2fs_put_page(node_page, 1);
448                 stat_inc_node_blk_count(sbi, 1, gc_type);
449         }
450
451         if (initial) {
452                 initial = false;
453                 goto next_step;
454         }
455
456         if (gc_type == FG_GC) {
457                 struct writeback_control wbc = {
458                         .sync_mode = WB_SYNC_ALL,
459                         .nr_to_write = LONG_MAX,
460                         .for_reclaim = 0,
461                 };
462                 sync_node_pages(sbi, 0, &wbc);
463
464                 /* return 1 only if FG_GC succefully reclaimed one */
465                 if (get_valid_blocks(sbi, segno, 1) == 0)
466                         return 1;
467         }
468         return 0;
469 }
470
471 /*
472  * Calculate start block index indicating the given node offset.
473  * Be careful, caller should give this node offset only indicating direct node
474  * blocks. If any node offsets, which point the other types of node blocks such
475  * as indirect or double indirect node blocks, are given, it must be a caller's
476  * bug.
477  */
478 block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
479 {
480         unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
481         unsigned int bidx;
482
483         if (node_ofs == 0)
484                 return 0;
485
486         if (node_ofs <= 2) {
487                 bidx = node_ofs - 1;
488         } else if (node_ofs <= indirect_blks) {
489                 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
490                 bidx = node_ofs - 2 - dec;
491         } else {
492                 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
493                 bidx = node_ofs - 5 - dec;
494         }
495         return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
496 }
497
498 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
499                 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
500 {
501         struct page *node_page;
502         nid_t nid;
503         unsigned int ofs_in_node;
504         block_t source_blkaddr;
505
506         nid = le32_to_cpu(sum->nid);
507         ofs_in_node = le16_to_cpu(sum->ofs_in_node);
508
509         node_page = get_node_page(sbi, nid);
510         if (IS_ERR(node_page))
511                 return false;
512
513         get_node_info(sbi, nid, dni);
514
515         if (sum->version != dni->version) {
516                 f2fs_put_page(node_page, 1);
517                 return false;
518         }
519
520         *nofs = ofs_of_node(node_page);
521         source_blkaddr = datablock_addr(node_page, ofs_in_node);
522         f2fs_put_page(node_page, 1);
523
524         if (source_blkaddr != blkaddr)
525                 return false;
526         return true;
527 }
528
529 static void move_encrypted_block(struct inode *inode, block_t bidx)
530 {
531         struct f2fs_io_info fio = {
532                 .sbi = F2FS_I_SB(inode),
533                 .type = DATA,
534                 .rw = READ_SYNC,
535                 .encrypted_page = NULL,
536         };
537         struct dnode_of_data dn;
538         struct f2fs_summary sum;
539         struct node_info ni;
540         struct page *page;
541         int err;
542
543         /* do not read out */
544         page = grab_cache_page(inode->i_mapping, bidx);
545         if (!page)
546                 return;
547
548         set_new_dnode(&dn, inode, NULL, NULL, 0);
549         err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
550         if (err)
551                 goto out;
552
553         if (unlikely(dn.data_blkaddr == NULL_ADDR))
554                 goto put_out;
555
556         get_node_info(fio.sbi, dn.nid, &ni);
557         set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
558
559         /* read page */
560         fio.page = page;
561         fio.blk_addr = dn.data_blkaddr;
562
563         fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi),
564                                         fio.blk_addr,
565                                         FGP_LOCK|FGP_CREAT,
566                                         GFP_NOFS);
567         if (!fio.encrypted_page)
568                 goto put_out;
569
570         err = f2fs_submit_page_bio(&fio);
571         if (err)
572                 goto put_page_out;
573
574         /* write page */
575         lock_page(fio.encrypted_page);
576
577         if (unlikely(!PageUptodate(fio.encrypted_page)))
578                 goto put_page_out;
579         if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
580                 goto put_page_out;
581
582         set_page_dirty(fio.encrypted_page);
583         f2fs_wait_on_page_writeback(fio.encrypted_page, META);
584         if (clear_page_dirty_for_io(fio.encrypted_page))
585                 dec_page_count(fio.sbi, F2FS_DIRTY_META);
586
587         set_page_writeback(fio.encrypted_page);
588
589         /* allocate block address */
590         f2fs_wait_on_page_writeback(dn.node_page, NODE);
591         allocate_data_block(fio.sbi, NULL, fio.blk_addr,
592                                         &fio.blk_addr, &sum, CURSEG_COLD_DATA);
593         fio.rw = WRITE_SYNC;
594         f2fs_submit_page_mbio(&fio);
595
596         dn.data_blkaddr = fio.blk_addr;
597         set_data_blkaddr(&dn);
598         f2fs_update_extent_cache(&dn);
599         set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
600         if (page->index == 0)
601                 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
602 put_page_out:
603         f2fs_put_page(fio.encrypted_page, 1);
604 put_out:
605         f2fs_put_dnode(&dn);
606 out:
607         f2fs_put_page(page, 1);
608 }
609
610 static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
611 {
612         struct page *page;
613
614         page = get_lock_data_page(inode, bidx);
615         if (IS_ERR(page))
616                 return;
617
618         if (gc_type == BG_GC) {
619                 if (PageWriteback(page))
620                         goto out;
621                 set_page_dirty(page);
622                 set_cold_data(page);
623         } else {
624                 struct f2fs_io_info fio = {
625                         .sbi = F2FS_I_SB(inode),
626                         .type = DATA,
627                         .rw = WRITE_SYNC,
628                         .page = page,
629                         .encrypted_page = NULL,
630                 };
631                 set_page_dirty(page);
632                 f2fs_wait_on_page_writeback(page, DATA);
633                 if (clear_page_dirty_for_io(page))
634                         inode_dec_dirty_pages(inode);
635                 set_cold_data(page);
636                 do_write_data_page(&fio);
637                 clear_cold_data(page);
638         }
639 out:
640         f2fs_put_page(page, 1);
641 }
642
643 /*
644  * This function tries to get parent node of victim data block, and identifies
645  * data block validity. If the block is valid, copy that with cold status and
646  * modify parent node.
647  * If the parent node is not valid or the data block address is different,
648  * the victim data block is ignored.
649  */
650 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
651                 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
652 {
653         struct super_block *sb = sbi->sb;
654         struct f2fs_summary *entry;
655         block_t start_addr;
656         int off;
657         int phase = 0;
658
659         start_addr = START_BLOCK(sbi, segno);
660
661 next_step:
662         entry = sum;
663
664         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
665                 struct page *data_page;
666                 struct inode *inode;
667                 struct node_info dni; /* dnode info for the data */
668                 unsigned int ofs_in_node, nofs;
669                 block_t start_bidx;
670
671                 /* stop BG_GC if there is not enough free sections. */
672                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
673                         return 0;
674
675                 if (check_valid_map(sbi, segno, off) == 0)
676                         continue;
677
678                 if (phase == 0) {
679                         ra_node_page(sbi, le32_to_cpu(entry->nid));
680                         continue;
681                 }
682
683                 /* Get an inode by ino with checking validity */
684                 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
685                         continue;
686
687                 if (phase == 1) {
688                         ra_node_page(sbi, dni.ino);
689                         continue;
690                 }
691
692                 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
693
694                 if (phase == 2) {
695                         inode = f2fs_iget(sb, dni.ino);
696                         if (IS_ERR(inode) || is_bad_inode(inode))
697                                 continue;
698
699                         /* if encrypted inode, let's go phase 3 */
700                         if (f2fs_encrypted_inode(inode) &&
701                                                 S_ISREG(inode->i_mode)) {
702                                 add_gc_inode(gc_list, inode);
703                                 continue;
704                         }
705
706                         start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
707                         data_page = get_read_data_page(inode,
708                                         start_bidx + ofs_in_node, READA);
709                         if (IS_ERR(data_page)) {
710                                 iput(inode);
711                                 continue;
712                         }
713
714                         f2fs_put_page(data_page, 0);
715                         add_gc_inode(gc_list, inode);
716                         continue;
717                 }
718
719                 /* phase 3 */
720                 inode = find_gc_inode(gc_list, dni.ino);
721                 if (inode) {
722                         start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
723                                                                 + ofs_in_node;
724                         if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
725                                 move_encrypted_block(inode, start_bidx);
726                         else
727                                 move_data_page(inode, start_bidx, gc_type);
728                         stat_inc_data_blk_count(sbi, 1, gc_type);
729                 }
730         }
731
732         if (++phase < 4)
733                 goto next_step;
734
735         if (gc_type == FG_GC) {
736                 f2fs_submit_merged_bio(sbi, DATA, WRITE);
737
738                 /* return 1 only if FG_GC succefully reclaimed one */
739                 if (get_valid_blocks(sbi, segno, 1) == 0)
740                         return 1;
741         }
742         return 0;
743 }
744
745 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
746                         int gc_type)
747 {
748         struct sit_info *sit_i = SIT_I(sbi);
749         int ret;
750
751         mutex_lock(&sit_i->sentry_lock);
752         ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
753                                               NO_CHECK_TYPE, LFS);
754         mutex_unlock(&sit_i->sentry_lock);
755         return ret;
756 }
757
758 static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
759                                 struct gc_inode_list *gc_list, int gc_type)
760 {
761         struct page *sum_page;
762         struct f2fs_summary_block *sum;
763         struct blk_plug plug;
764         int nfree = 0;
765
766         /* read segment summary of victim */
767         sum_page = get_sum_page(sbi, segno);
768
769         blk_start_plug(&plug);
770
771         sum = page_address(sum_page);
772
773         /*
774          * this is to avoid deadlock:
775          * - lock_page(sum_page)         - f2fs_replace_block
776          *  - check_valid_map()            - mutex_lock(sentry_lock)
777          *   - mutex_lock(sentry_lock)     - change_curseg()
778          *                                  - lock_page(sum_page)
779          */
780         unlock_page(sum_page);
781
782         switch (GET_SUM_TYPE((&sum->footer))) {
783         case SUM_TYPE_NODE:
784                 nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
785                 break;
786         case SUM_TYPE_DATA:
787                 nfree = gc_data_segment(sbi, sum->entries, gc_list,
788                                                         segno, gc_type);
789                 break;
790         }
791         blk_finish_plug(&plug);
792
793         stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
794         stat_inc_call_count(sbi->stat_info);
795
796         f2fs_put_page(sum_page, 0);
797         return nfree;
798 }
799
800 int f2fs_gc(struct f2fs_sb_info *sbi)
801 {
802         unsigned int segno = NULL_SEGNO;
803         unsigned int i;
804         int gc_type = BG_GC;
805         int nfree = 0;
806         int ret = -1;
807         struct cp_control cpc;
808         struct gc_inode_list gc_list = {
809                 .ilist = LIST_HEAD_INIT(gc_list.ilist),
810                 .iroot = RADIX_TREE_INIT(GFP_NOFS),
811         };
812
813         cpc.reason = __get_cp_reason(sbi);
814 gc_more:
815         if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
816                 goto stop;
817         if (unlikely(f2fs_cp_error(sbi)))
818                 goto stop;
819
820         if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
821                 gc_type = FG_GC;
822                 if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
823                         write_checkpoint(sbi, &cpc);
824         }
825
826         if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
827                 goto stop;
828         ret = 0;
829
830         /* readahead multi ssa blocks those have contiguous address */
831         if (sbi->segs_per_sec > 1)
832                 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
833                                                                 META_SSA);
834
835         for (i = 0; i < sbi->segs_per_sec; i++)
836                 nfree += do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
837
838         if (gc_type == FG_GC)
839                 sbi->cur_victim_sec = NULL_SEGNO;
840
841         if (has_not_enough_free_secs(sbi, nfree))
842                 goto gc_more;
843
844         if (gc_type == FG_GC)
845                 write_checkpoint(sbi, &cpc);
846 stop:
847         mutex_unlock(&sbi->gc_mutex);
848
849         put_gc_inode(&gc_list);
850         return ret;
851 }
852
853 void build_gc_manager(struct f2fs_sb_info *sbi)
854 {
855         DIRTY_I(sbi)->v_ops = &default_v_ops;
856 }