]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/gfs2/log.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[karo-tx-linux.git] / fs / gfs2 / log.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/crc32.h>
17 #include <linux/delay.h>
18 #include <linux/kthread.h>
19 #include <linux/freezer.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/writeback.h>
23 #include <linux/list_sort.h>
24
25 #include "gfs2.h"
26 #include "incore.h"
27 #include "bmap.h"
28 #include "glock.h"
29 #include "log.h"
30 #include "lops.h"
31 #include "meta_io.h"
32 #include "util.h"
33 #include "dir.h"
34 #include "trace_gfs2.h"
35
36 /**
37  * gfs2_struct2blk - compute stuff
38  * @sdp: the filesystem
39  * @nstruct: the number of structures
40  * @ssize: the size of the structures
41  *
42  * Compute the number of log descriptor blocks needed to hold a certain number
43  * of structures of a certain size.
44  *
45  * Returns: the number of blocks needed (minimum is always 1)
46  */
47
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct,
49                              unsigned int ssize)
50 {
51         unsigned int blks;
52         unsigned int first, second;
53
54         blks = 1;
55         first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize;
56
57         if (nstruct > first) {
58                 second = (sdp->sd_sb.sb_bsize -
59                           sizeof(struct gfs2_meta_header)) / ssize;
60                 blks += DIV_ROUND_UP(nstruct - first, second);
61         }
62
63         return blks;
64 }
65
66 /**
67  * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters
68  * @mapping: The associated mapping (maybe NULL)
69  * @bd: The gfs2_bufdata to remove
70  *
71  * The ail lock _must_ be held when calling this function
72  *
73  */
74
75 void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
76 {
77         bd->bd_tr = NULL;
78         list_del_init(&bd->bd_ail_st_list);
79         list_del_init(&bd->bd_ail_gl_list);
80         atomic_dec(&bd->bd_gl->gl_ail_count);
81         brelse(bd->bd_bh);
82 }
83
84 /**
85  * gfs2_ail1_start_one - Start I/O on a part of the AIL
86  * @sdp: the filesystem
87  * @wbc: The writeback control structure
88  * @ai: The ail structure
89  *
90  */
91
92 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
93                                struct writeback_control *wbc,
94                                struct gfs2_trans *tr)
95 __releases(&sdp->sd_ail_lock)
96 __acquires(&sdp->sd_ail_lock)
97 {
98         struct gfs2_glock *gl = NULL;
99         struct address_space *mapping;
100         struct gfs2_bufdata *bd, *s;
101         struct buffer_head *bh;
102
103         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) {
104                 bh = bd->bd_bh;
105
106                 gfs2_assert(sdp, bd->bd_tr == tr);
107
108                 if (!buffer_busy(bh)) {
109                         if (!buffer_uptodate(bh))
110                                 gfs2_io_error_bh(sdp, bh);
111                         list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
112                         continue;
113                 }
114
115                 if (!buffer_dirty(bh))
116                         continue;
117                 if (gl == bd->bd_gl)
118                         continue;
119                 gl = bd->bd_gl;
120                 list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list);
121                 mapping = bh->b_page->mapping;
122                 if (!mapping)
123                         continue;
124                 spin_unlock(&sdp->sd_ail_lock);
125                 generic_writepages(mapping, wbc);
126                 spin_lock(&sdp->sd_ail_lock);
127                 if (wbc->nr_to_write <= 0)
128                         break;
129                 return 1;
130         }
131
132         return 0;
133 }
134
135
136 /**
137  * gfs2_ail1_flush - start writeback of some ail1 entries 
138  * @sdp: The super block
139  * @wbc: The writeback control structure
140  *
141  * Writes back some ail1 entries, according to the limits in the
142  * writeback control structure
143  */
144
145 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
146 {
147         struct list_head *head = &sdp->sd_ail1_list;
148         struct gfs2_trans *tr;
149         struct blk_plug plug;
150
151         trace_gfs2_ail_flush(sdp, wbc, 1);
152         blk_start_plug(&plug);
153         spin_lock(&sdp->sd_ail_lock);
154 restart:
155         list_for_each_entry_reverse(tr, head, tr_list) {
156                 if (wbc->nr_to_write <= 0)
157                         break;
158                 if (gfs2_ail1_start_one(sdp, wbc, tr))
159                         goto restart;
160         }
161         spin_unlock(&sdp->sd_ail_lock);
162         blk_finish_plug(&plug);
163         trace_gfs2_ail_flush(sdp, wbc, 0);
164 }
165
166 /**
167  * gfs2_ail1_start - start writeback of all ail1 entries
168  * @sdp: The superblock
169  */
170
171 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
172 {
173         struct writeback_control wbc = {
174                 .sync_mode = WB_SYNC_NONE,
175                 .nr_to_write = LONG_MAX,
176                 .range_start = 0,
177                 .range_end = LLONG_MAX,
178         };
179
180         return gfs2_ail1_flush(sdp, &wbc);
181 }
182
183 /**
184  * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced
185  * @sdp: the filesystem
186  * @ai: the AIL entry
187  *
188  */
189
190 static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
191 {
192         struct gfs2_bufdata *bd, *s;
193         struct buffer_head *bh;
194
195         list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list,
196                                          bd_ail_st_list) {
197                 bh = bd->bd_bh;
198                 gfs2_assert(sdp, bd->bd_tr == tr);
199                 if (buffer_busy(bh))
200                         continue;
201                 if (!buffer_uptodate(bh))
202                         gfs2_io_error_bh(sdp, bh);
203                 list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list);
204         }
205
206 }
207
208 /**
209  * gfs2_ail1_empty - Try to empty the ail1 lists
210  * @sdp: The superblock
211  *
212  * Tries to empty the ail1 lists, starting with the oldest first
213  */
214
215 static int gfs2_ail1_empty(struct gfs2_sbd *sdp)
216 {
217         struct gfs2_trans *tr, *s;
218         int oldest_tr = 1;
219         int ret;
220
221         spin_lock(&sdp->sd_ail_lock);
222         list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
223                 gfs2_ail1_empty_one(sdp, tr);
224                 if (list_empty(&tr->tr_ail1_list) && oldest_tr)
225                         list_move(&tr->tr_list, &sdp->sd_ail2_list);
226                 else
227                         oldest_tr = 0;
228         }
229         ret = list_empty(&sdp->sd_ail1_list);
230         spin_unlock(&sdp->sd_ail_lock);
231
232         return ret;
233 }
234
235 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
236 {
237         struct gfs2_trans *tr;
238         struct gfs2_bufdata *bd;
239         struct buffer_head *bh;
240
241         spin_lock(&sdp->sd_ail_lock);
242         list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
243                 list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) {
244                         bh = bd->bd_bh;
245                         if (!buffer_locked(bh))
246                                 continue;
247                         get_bh(bh);
248                         spin_unlock(&sdp->sd_ail_lock);
249                         wait_on_buffer(bh);
250                         brelse(bh);
251                         return;
252                 }
253         }
254         spin_unlock(&sdp->sd_ail_lock);
255 }
256
257 /**
258  * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced
259  * @sdp: the filesystem
260  * @ai: the AIL entry
261  *
262  */
263
264 static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
265 {
266         struct list_head *head = &tr->tr_ail2_list;
267         struct gfs2_bufdata *bd;
268
269         while (!list_empty(head)) {
270                 bd = list_entry(head->prev, struct gfs2_bufdata,
271                                 bd_ail_st_list);
272                 gfs2_assert(sdp, bd->bd_tr == tr);
273                 gfs2_remove_from_ail(bd);
274         }
275 }
276
277 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
278 {
279         struct gfs2_trans *tr, *safe;
280         unsigned int old_tail = sdp->sd_log_tail;
281         int wrap = (new_tail < old_tail);
282         int a, b, rm;
283
284         spin_lock(&sdp->sd_ail_lock);
285
286         list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) {
287                 a = (old_tail <= tr->tr_first);
288                 b = (tr->tr_first < new_tail);
289                 rm = (wrap) ? (a || b) : (a && b);
290                 if (!rm)
291                         continue;
292
293                 gfs2_ail2_empty_one(sdp, tr);
294                 list_del(&tr->tr_list);
295                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
296                 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
297                 kfree(tr);
298         }
299
300         spin_unlock(&sdp->sd_ail_lock);
301 }
302
303 /**
304  * gfs2_log_release - Release a given number of log blocks
305  * @sdp: The GFS2 superblock
306  * @blks: The number of blocks
307  *
308  */
309
310 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
311 {
312
313         atomic_add(blks, &sdp->sd_log_blks_free);
314         trace_gfs2_log_blocks(sdp, blks);
315         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
316                                   sdp->sd_jdesc->jd_blocks);
317         up_read(&sdp->sd_log_flush_lock);
318 }
319
320 /**
321  * gfs2_log_reserve - Make a log reservation
322  * @sdp: The GFS2 superblock
323  * @blks: The number of blocks to reserve
324  *
325  * Note that we never give out the last few blocks of the journal. Thats
326  * due to the fact that there is a small number of header blocks
327  * associated with each log flush. The exact number can't be known until
328  * flush time, so we ensure that we have just enough free blocks at all
329  * times to avoid running out during a log flush.
330  *
331  * We no longer flush the log here, instead we wake up logd to do that
332  * for us. To avoid the thundering herd and to ensure that we deal fairly
333  * with queued waiters, we use an exclusive wait. This means that when we
334  * get woken with enough journal space to get our reservation, we need to
335  * wake the next waiter on the list.
336  *
337  * Returns: errno
338  */
339
340 int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks)
341 {
342         unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize);
343         unsigned wanted = blks + reserved_blks;
344         DEFINE_WAIT(wait);
345         int did_wait = 0;
346         unsigned int free_blocks;
347
348         if (gfs2_assert_warn(sdp, blks) ||
349             gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks))
350                 return -EINVAL;
351 retry:
352         free_blocks = atomic_read(&sdp->sd_log_blks_free);
353         if (unlikely(free_blocks <= wanted)) {
354                 do {
355                         prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait,
356                                         TASK_UNINTERRUPTIBLE);
357                         wake_up(&sdp->sd_logd_waitq);
358                         did_wait = 1;
359                         if (atomic_read(&sdp->sd_log_blks_free) <= wanted)
360                                 io_schedule();
361                         free_blocks = atomic_read(&sdp->sd_log_blks_free);
362                 } while(free_blocks <= wanted);
363                 finish_wait(&sdp->sd_log_waitq, &wait);
364         }
365         if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks,
366                                 free_blocks - blks) != free_blocks)
367                 goto retry;
368         trace_gfs2_log_blocks(sdp, -blks);
369
370         /*
371          * If we waited, then so might others, wake them up _after_ we get
372          * our share of the log.
373          */
374         if (unlikely(did_wait))
375                 wake_up(&sdp->sd_log_waitq);
376
377         down_read(&sdp->sd_log_flush_lock);
378         if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
379                 gfs2_log_release(sdp, blks);
380                 return -EROFS;
381         }
382         return 0;
383 }
384
385 /**
386  * log_distance - Compute distance between two journal blocks
387  * @sdp: The GFS2 superblock
388  * @newer: The most recent journal block of the pair
389  * @older: The older journal block of the pair
390  *
391  *   Compute the distance (in the journal direction) between two
392  *   blocks in the journal
393  *
394  * Returns: the distance in blocks
395  */
396
397 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
398                                         unsigned int older)
399 {
400         int dist;
401
402         dist = newer - older;
403         if (dist < 0)
404                 dist += sdp->sd_jdesc->jd_blocks;
405
406         return dist;
407 }
408
409 /**
410  * calc_reserved - Calculate the number of blocks to reserve when
411  *                 refunding a transaction's unused buffers.
412  * @sdp: The GFS2 superblock
413  *
414  * This is complex.  We need to reserve room for all our currently used
415  * metadata buffers (e.g. normal file I/O rewriting file time stamps) and 
416  * all our journaled data buffers for journaled files (e.g. files in the 
417  * meta_fs like rindex, or files for which chattr +j was done.)
418  * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush
419  * will count it as free space (sd_log_blks_free) and corruption will follow.
420  *
421  * We can have metadata bufs and jdata bufs in the same journal.  So each
422  * type gets its own log header, for which we need to reserve a block.
423  * In fact, each type has the potential for needing more than one header 
424  * in cases where we have more buffers than will fit on a journal page.
425  * Metadata journal entries take up half the space of journaled buffer entries.
426  * Thus, metadata entries have buf_limit (502) and journaled buffers have
427  * databuf_limit (251) before they cause a wrap around.
428  *
429  * Also, we need to reserve blocks for revoke journal entries and one for an
430  * overall header for the lot.
431  *
432  * Returns: the number of blocks reserved
433  */
434 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
435 {
436         unsigned int reserved = 0;
437         unsigned int mbuf;
438         unsigned int dbuf;
439         struct gfs2_trans *tr = sdp->sd_log_tr;
440
441         if (tr) {
442                 mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
443                 dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
444                 reserved = mbuf + dbuf;
445                 /* Account for header blocks */
446                 reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp));
447                 reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp));
448         }
449
450         if (sdp->sd_log_commited_revoke > 0)
451                 reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke,
452                                           sizeof(u64));
453         /* One for the overall header */
454         if (reserved)
455                 reserved++;
456         return reserved;
457 }
458
459 static unsigned int current_tail(struct gfs2_sbd *sdp)
460 {
461         struct gfs2_trans *tr;
462         unsigned int tail;
463
464         spin_lock(&sdp->sd_ail_lock);
465
466         if (list_empty(&sdp->sd_ail1_list)) {
467                 tail = sdp->sd_log_head;
468         } else {
469                 tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans,
470                                 tr_list);
471                 tail = tr->tr_first;
472         }
473
474         spin_unlock(&sdp->sd_ail_lock);
475
476         return tail;
477 }
478
479 static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail)
480 {
481         unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
482
483         ail2_empty(sdp, new_tail);
484
485         atomic_add(dist, &sdp->sd_log_blks_free);
486         trace_gfs2_log_blocks(sdp, dist);
487         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
488                              sdp->sd_jdesc->jd_blocks);
489
490         sdp->sd_log_tail = new_tail;
491 }
492
493
494 static void log_flush_wait(struct gfs2_sbd *sdp)
495 {
496         DEFINE_WAIT(wait);
497
498         if (atomic_read(&sdp->sd_log_in_flight)) {
499                 do {
500                         prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
501                                         TASK_UNINTERRUPTIBLE);
502                         if (atomic_read(&sdp->sd_log_in_flight))
503                                 io_schedule();
504                 } while(atomic_read(&sdp->sd_log_in_flight));
505                 finish_wait(&sdp->sd_log_flush_wait, &wait);
506         }
507 }
508
509 static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
510 {
511         struct gfs2_inode *ipa, *ipb;
512
513         ipa = list_entry(a, struct gfs2_inode, i_ordered);
514         ipb = list_entry(b, struct gfs2_inode, i_ordered);
515
516         if (ipa->i_no_addr < ipb->i_no_addr)
517                 return -1;
518         if (ipa->i_no_addr > ipb->i_no_addr)
519                 return 1;
520         return 0;
521 }
522
523 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
524 {
525         struct gfs2_inode *ip;
526         LIST_HEAD(written);
527
528         spin_lock(&sdp->sd_ordered_lock);
529         list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp);
530         while (!list_empty(&sdp->sd_log_le_ordered)) {
531                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
532                 list_move(&ip->i_ordered, &written);
533                 if (ip->i_inode.i_mapping->nrpages == 0)
534                         continue;
535                 spin_unlock(&sdp->sd_ordered_lock);
536                 filemap_fdatawrite(ip->i_inode.i_mapping);
537                 spin_lock(&sdp->sd_ordered_lock);
538         }
539         list_splice(&written, &sdp->sd_log_le_ordered);
540         spin_unlock(&sdp->sd_ordered_lock);
541 }
542
543 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
544 {
545         struct gfs2_inode *ip;
546
547         spin_lock(&sdp->sd_ordered_lock);
548         while (!list_empty(&sdp->sd_log_le_ordered)) {
549                 ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered);
550                 list_del(&ip->i_ordered);
551                 WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
552                 if (ip->i_inode.i_mapping->nrpages == 0)
553                         continue;
554                 spin_unlock(&sdp->sd_ordered_lock);
555                 filemap_fdatawait(ip->i_inode.i_mapping);
556                 spin_lock(&sdp->sd_ordered_lock);
557         }
558         spin_unlock(&sdp->sd_ordered_lock);
559 }
560
561 void gfs2_ordered_del_inode(struct gfs2_inode *ip)
562 {
563         struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
564
565         spin_lock(&sdp->sd_ordered_lock);
566         if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
567                 list_del(&ip->i_ordered);
568         spin_unlock(&sdp->sd_ordered_lock);
569 }
570
571 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
572 {
573         struct buffer_head *bh = bd->bd_bh;
574         struct gfs2_glock *gl = bd->bd_gl;
575
576         bh->b_private = NULL;
577         bd->bd_blkno = bh->b_blocknr;
578         gfs2_remove_from_ail(bd); /* drops ref on bh */
579         bd->bd_bh = NULL;
580         bd->bd_ops = &gfs2_revoke_lops;
581         sdp->sd_log_num_revoke++;
582         atomic_inc(&gl->gl_revokes);
583         set_bit(GLF_LFLUSH, &gl->gl_flags);
584         list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
585 }
586
587 void gfs2_write_revokes(struct gfs2_sbd *sdp)
588 {
589         struct gfs2_trans *tr;
590         struct gfs2_bufdata *bd, *tmp;
591         int have_revokes = 0;
592         int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
593
594         gfs2_ail1_empty(sdp);
595         spin_lock(&sdp->sd_ail_lock);
596         list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
597                 list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) {
598                         if (list_empty(&bd->bd_list)) {
599                                 have_revokes = 1;
600                                 goto done;
601                         }
602                 }
603         }
604 done:
605         spin_unlock(&sdp->sd_ail_lock);
606         if (have_revokes == 0)
607                 return;
608         while (sdp->sd_log_num_revoke > max_revokes)
609                 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
610         max_revokes -= sdp->sd_log_num_revoke;
611         if (!sdp->sd_log_num_revoke) {
612                 atomic_dec(&sdp->sd_log_blks_free);
613                 /* If no blocks have been reserved, we need to also
614                  * reserve a block for the header */
615                 if (!sdp->sd_log_blks_reserved)
616                         atomic_dec(&sdp->sd_log_blks_free);
617         }
618         gfs2_log_lock(sdp);
619         spin_lock(&sdp->sd_ail_lock);
620         list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) {
621                 list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) {
622                         if (max_revokes == 0)
623                                 goto out_of_blocks;
624                         if (!list_empty(&bd->bd_list))
625                                 continue;
626                         gfs2_add_revoke(sdp, bd);
627                         max_revokes--;
628                 }
629         }
630 out_of_blocks:
631         spin_unlock(&sdp->sd_ail_lock);
632         gfs2_log_unlock(sdp);
633
634         if (!sdp->sd_log_num_revoke) {
635                 atomic_inc(&sdp->sd_log_blks_free);
636                 if (!sdp->sd_log_blks_reserved)
637                         atomic_inc(&sdp->sd_log_blks_free);
638         }
639 }
640
641 /**
642  * log_write_header - Get and initialize a journal header buffer
643  * @sdp: The GFS2 superblock
644  *
645  * Returns: the initialized log buffer descriptor
646  */
647
648 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
649 {
650         struct gfs2_log_header *lh;
651         unsigned int tail;
652         u32 hash;
653         int rw = WRITE_FLUSH_FUA | REQ_META;
654         struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
655         lh = page_address(page);
656         clear_page(lh);
657
658         tail = current_tail(sdp);
659
660         lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
661         lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH);
662         lh->lh_header.__pad0 = cpu_to_be64(0);
663         lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH);
664         lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
665         lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++);
666         lh->lh_flags = cpu_to_be32(flags);
667         lh->lh_tail = cpu_to_be32(tail);
668         lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head);
669         hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header));
670         lh->lh_hash = cpu_to_be32(hash);
671
672         if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
673                 gfs2_ordered_wait(sdp);
674                 log_flush_wait(sdp);
675                 rw = WRITE_SYNC | REQ_META | REQ_PRIO;
676         }
677
678         sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
679         gfs2_log_write_page(sdp, page);
680         gfs2_log_flush_bio(sdp, rw);
681         log_flush_wait(sdp);
682
683         if (sdp->sd_log_tail != tail)
684                 log_pull_tail(sdp, tail);
685 }
686
687 /**
688  * gfs2_log_flush - flush incore transaction(s)
689  * @sdp: the filesystem
690  * @gl: The glock structure to flush.  If NULL, flush the whole incore log
691  *
692  */
693
694 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
695                     enum gfs2_flush_type type)
696 {
697         struct gfs2_trans *tr;
698
699         down_write(&sdp->sd_log_flush_lock);
700
701         /* Log might have been flushed while we waited for the flush lock */
702         if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) {
703                 up_write(&sdp->sd_log_flush_lock);
704                 return;
705         }
706         trace_gfs2_log_flush(sdp, 1);
707
708         sdp->sd_log_flush_head = sdp->sd_log_head;
709         sdp->sd_log_flush_wrapped = 0;
710         tr = sdp->sd_log_tr;
711         if (tr) {
712                 sdp->sd_log_tr = NULL;
713                 INIT_LIST_HEAD(&tr->tr_ail1_list);
714                 INIT_LIST_HEAD(&tr->tr_ail2_list);
715                 tr->tr_first = sdp->sd_log_flush_head;
716         }
717
718         gfs2_assert_withdraw(sdp,
719                         sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke);
720
721         gfs2_ordered_write(sdp);
722         lops_before_commit(sdp, tr);
723         gfs2_log_flush_bio(sdp, WRITE);
724
725         if (sdp->sd_log_head != sdp->sd_log_flush_head) {
726                 log_flush_wait(sdp);
727                 log_write_header(sdp, 0);
728         } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){
729                 atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
730                 trace_gfs2_log_blocks(sdp, -1);
731                 log_write_header(sdp, 0);
732         }
733         lops_after_commit(sdp, tr);
734
735         gfs2_log_lock(sdp);
736         sdp->sd_log_head = sdp->sd_log_flush_head;
737         sdp->sd_log_blks_reserved = 0;
738         sdp->sd_log_commited_revoke = 0;
739
740         spin_lock(&sdp->sd_ail_lock);
741         if (tr && !list_empty(&tr->tr_ail1_list)) {
742                 list_add(&tr->tr_list, &sdp->sd_ail1_list);
743                 tr = NULL;
744         }
745         spin_unlock(&sdp->sd_ail_lock);
746         gfs2_log_unlock(sdp);
747
748         if (atomic_read(&sdp->sd_log_freeze))
749                 type = FREEZE_FLUSH;
750         if (type != NORMAL_FLUSH) {
751                 if (!sdp->sd_log_idle) {
752                         for (;;) {
753                                 gfs2_ail1_start(sdp);
754                                 gfs2_ail1_wait(sdp);
755                                 if (gfs2_ail1_empty(sdp))
756                                         break;
757                         }
758                         atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
759                         trace_gfs2_log_blocks(sdp, -1);
760                         sdp->sd_log_flush_wrapped = 0;
761                         log_write_header(sdp, 0);
762                         sdp->sd_log_head = sdp->sd_log_flush_head;
763                 }
764                 if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH)
765                         gfs2_log_shutdown(sdp);
766                 if (type == FREEZE_FLUSH) {
767                         int error;
768
769                         atomic_set(&sdp->sd_log_freeze, 0);
770                         wake_up(&sdp->sd_log_frozen_wait);
771                         error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
772                                                    LM_ST_SHARED, 0,
773                                                    &sdp->sd_thaw_gh);
774                         if (error) {
775                                 printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error);
776                                 gfs2_assert_withdraw(sdp, 0);
777                         }
778                         else
779                                 gfs2_glock_dq_uninit(&sdp->sd_thaw_gh);
780                 }
781         }
782
783         trace_gfs2_log_flush(sdp, 0);
784         up_write(&sdp->sd_log_flush_lock);
785
786         kfree(tr);
787 }
788
789 /**
790  * gfs2_merge_trans - Merge a new transaction into a cached transaction
791  * @old: Original transaction to be expanded
792  * @new: New transaction to be merged
793  */
794
795 static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
796 {
797         WARN_ON_ONCE(old->tr_attached != 1);
798
799         old->tr_num_buf_new     += new->tr_num_buf_new;
800         old->tr_num_databuf_new += new->tr_num_databuf_new;
801         old->tr_num_buf_rm      += new->tr_num_buf_rm;
802         old->tr_num_databuf_rm  += new->tr_num_databuf_rm;
803         old->tr_num_revoke      += new->tr_num_revoke;
804         old->tr_num_revoke_rm   += new->tr_num_revoke_rm;
805
806         list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
807         list_splice_tail_init(&new->tr_buf, &old->tr_buf);
808 }
809
810 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
811 {
812         unsigned int reserved;
813         unsigned int unused;
814         unsigned int maxres;
815
816         gfs2_log_lock(sdp);
817
818         if (sdp->sd_log_tr) {
819                 gfs2_merge_trans(sdp->sd_log_tr, tr);
820         } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
821                 gfs2_assert_withdraw(sdp, tr->tr_alloced);
822                 sdp->sd_log_tr = tr;
823                 tr->tr_attached = 1;
824         }
825
826         sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm;
827         reserved = calc_reserved(sdp);
828         maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
829         gfs2_assert_withdraw(sdp, maxres >= reserved);
830         unused = maxres - reserved;
831         atomic_add(unused, &sdp->sd_log_blks_free);
832         trace_gfs2_log_blocks(sdp, unused);
833         gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
834                              sdp->sd_jdesc->jd_blocks);
835         sdp->sd_log_blks_reserved = reserved;
836
837         gfs2_log_unlock(sdp);
838 }
839
840 /**
841  * gfs2_log_commit - Commit a transaction to the log
842  * @sdp: the filesystem
843  * @tr: the transaction
844  *
845  * We wake up gfs2_logd if the number of pinned blocks exceed thresh1
846  * or the total number of used blocks (pinned blocks plus AIL blocks)
847  * is greater than thresh2.
848  *
849  * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of
850  * journal size.
851  *
852  * Returns: errno
853  */
854
855 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
856 {
857         log_refund(sdp, tr);
858
859         if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
860             ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
861             atomic_read(&sdp->sd_log_thresh2)))
862                 wake_up(&sdp->sd_logd_waitq);
863 }
864
865 /**
866  * gfs2_log_shutdown - write a shutdown header into a journal
867  * @sdp: the filesystem
868  *
869  */
870
871 void gfs2_log_shutdown(struct gfs2_sbd *sdp)
872 {
873         gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
874         gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
875         gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
876
877         sdp->sd_log_flush_head = sdp->sd_log_head;
878         sdp->sd_log_flush_wrapped = 0;
879
880         log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT);
881
882         gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
883         gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
884
885         sdp->sd_log_head = sdp->sd_log_flush_head;
886         sdp->sd_log_tail = sdp->sd_log_head;
887 }
888
889 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
890 {
891         return (atomic_read(&sdp->sd_log_pinned) >= atomic_read(&sdp->sd_log_thresh1) || atomic_read(&sdp->sd_log_freeze));
892 }
893
894 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
895 {
896         unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
897         return used_blocks >= atomic_read(&sdp->sd_log_thresh2);
898 }
899
900 /**
901  * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
902  * @sdp: Pointer to GFS2 superblock
903  *
904  * Also, periodically check to make sure that we're using the most recent
905  * journal index.
906  */
907
908 int gfs2_logd(void *data)
909 {
910         struct gfs2_sbd *sdp = data;
911         unsigned long t = 1;
912         DEFINE_WAIT(wait);
913
914         while (!kthread_should_stop()) {
915
916                 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
917                         gfs2_ail1_empty(sdp);
918                         gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
919                 }
920
921                 if (gfs2_ail_flush_reqd(sdp)) {
922                         gfs2_ail1_start(sdp);
923                         gfs2_ail1_wait(sdp);
924                         gfs2_ail1_empty(sdp);
925                         gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
926                 }
927
928                 if (!gfs2_ail_flush_reqd(sdp))
929                         wake_up(&sdp->sd_log_waitq);
930
931                 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
932
933                 try_to_freeze();
934
935                 do {
936                         prepare_to_wait(&sdp->sd_logd_waitq, &wait,
937                                         TASK_INTERRUPTIBLE);
938                         if (!gfs2_ail_flush_reqd(sdp) &&
939                             !gfs2_jrnl_flush_reqd(sdp) &&
940                             !kthread_should_stop())
941                                 t = schedule_timeout(t);
942                 } while(t && !gfs2_ail_flush_reqd(sdp) &&
943                         !gfs2_jrnl_flush_reqd(sdp) &&
944                         !kthread_should_stop());
945                 finish_wait(&sdp->sd_logd_waitq, &wait);
946         }
947
948         return 0;
949 }
950