]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/jbd2/transaction.c
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[karo-tx-linux.git] / fs / jbd2 / transaction.c
1 /*
2  * linux/fs/jbd2/transaction.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Generic filesystem transaction handling code; part of the ext2fs
13  * journaling system.
14  *
15  * This file manages transactions (compound commits managed by the
16  * journaling code) and handles (individual atomic operations by the
17  * filesystem).
18  */
19
20 #include <linux/time.h>
21 #include <linux/fs.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
29 #include <linux/backing-dev.h>
30 #include <linux/bug.h>
31 #include <linux/module.h>
32
33 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
34 static void __jbd2_journal_unfile_buffer(struct journal_head *jh);
35
36 static struct kmem_cache *transaction_cache;
37 int __init jbd2_journal_init_transaction_cache(void)
38 {
39         J_ASSERT(!transaction_cache);
40         transaction_cache = kmem_cache_create("jbd2_transaction_s",
41                                         sizeof(transaction_t),
42                                         0,
43                                         SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
44                                         NULL);
45         if (transaction_cache)
46                 return 0;
47         return -ENOMEM;
48 }
49
50 void jbd2_journal_destroy_transaction_cache(void)
51 {
52         if (transaction_cache) {
53                 kmem_cache_destroy(transaction_cache);
54                 transaction_cache = NULL;
55         }
56 }
57
58 void jbd2_journal_free_transaction(transaction_t *transaction)
59 {
60         if (unlikely(ZERO_OR_NULL_PTR(transaction)))
61                 return;
62         kmem_cache_free(transaction_cache, transaction);
63 }
64
65 /*
66  * jbd2_get_transaction: obtain a new transaction_t object.
67  *
68  * Simply allocate and initialise a new transaction.  Create it in
69  * RUNNING state and add it to the current journal (which should not
70  * have an existing running transaction: we only make a new transaction
71  * once we have started to commit the old one).
72  *
73  * Preconditions:
74  *      The journal MUST be locked.  We don't perform atomic mallocs on the
75  *      new transaction and we can't block without protecting against other
76  *      processes trying to touch the journal while it is in transition.
77  *
78  */
79
80 static transaction_t *
81 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
82 {
83         transaction->t_journal = journal;
84         transaction->t_state = T_RUNNING;
85         transaction->t_start_time = ktime_get();
86         transaction->t_tid = journal->j_transaction_sequence++;
87         transaction->t_expires = jiffies + journal->j_commit_interval;
88         spin_lock_init(&transaction->t_handle_lock);
89         atomic_set(&transaction->t_updates, 0);
90         atomic_set(&transaction->t_outstanding_credits, 0);
91         atomic_set(&transaction->t_handle_count, 0);
92         INIT_LIST_HEAD(&transaction->t_inode_list);
93         INIT_LIST_HEAD(&transaction->t_private_list);
94
95         /* Set up the commit timer for the new transaction. */
96         journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
97         add_timer(&journal->j_commit_timer);
98
99         J_ASSERT(journal->j_running_transaction == NULL);
100         journal->j_running_transaction = transaction;
101         transaction->t_max_wait = 0;
102         transaction->t_start = jiffies;
103
104         return transaction;
105 }
106
107 /*
108  * Handle management.
109  *
110  * A handle_t is an object which represents a single atomic update to a
111  * filesystem, and which tracks all of the modifications which form part
112  * of that one update.
113  */
114
115 /*
116  * Update transaction's maximum wait time, if debugging is enabled.
117  *
118  * In order for t_max_wait to be reliable, it must be protected by a
119  * lock.  But doing so will mean that start_this_handle() can not be
120  * run in parallel on SMP systems, which limits our scalability.  So
121  * unless debugging is enabled, we no longer update t_max_wait, which
122  * means that maximum wait time reported by the jbd2_run_stats
123  * tracepoint will always be zero.
124  */
125 static inline void update_t_max_wait(transaction_t *transaction,
126                                      unsigned long ts)
127 {
128 #ifdef CONFIG_JBD2_DEBUG
129         if (jbd2_journal_enable_debug &&
130             time_after(transaction->t_start, ts)) {
131                 ts = jbd2_time_diff(ts, transaction->t_start);
132                 spin_lock(&transaction->t_handle_lock);
133                 if (ts > transaction->t_max_wait)
134                         transaction->t_max_wait = ts;
135                 spin_unlock(&transaction->t_handle_lock);
136         }
137 #endif
138 }
139
140 /*
141  * start_this_handle: Given a handle, deal with any locking or stalling
142  * needed to make sure that there is enough journal space for the handle
143  * to begin.  Attach the handle to a transaction and set up the
144  * transaction's buffer credits.
145  */
146
147 static int start_this_handle(journal_t *journal, handle_t *handle,
148                              gfp_t gfp_mask)
149 {
150         transaction_t   *transaction, *new_transaction = NULL;
151         tid_t           tid;
152         int             needed, need_to_start;
153         int             nblocks = handle->h_buffer_credits;
154         unsigned long ts = jiffies;
155
156         if (nblocks > journal->j_max_transaction_buffers) {
157                 printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n",
158                        current->comm, nblocks,
159                        journal->j_max_transaction_buffers);
160                 return -ENOSPC;
161         }
162
163 alloc_transaction:
164         if (!journal->j_running_transaction) {
165                 new_transaction = kmem_cache_zalloc(transaction_cache,
166                                                     gfp_mask);
167                 if (!new_transaction) {
168                         /*
169                          * If __GFP_FS is not present, then we may be
170                          * being called from inside the fs writeback
171                          * layer, so we MUST NOT fail.  Since
172                          * __GFP_NOFAIL is going away, we will arrange
173                          * to retry the allocation ourselves.
174                          */
175                         if ((gfp_mask & __GFP_FS) == 0) {
176                                 congestion_wait(BLK_RW_ASYNC, HZ/50);
177                                 goto alloc_transaction;
178                         }
179                         return -ENOMEM;
180                 }
181         }
182
183         jbd_debug(3, "New handle %p going live.\n", handle);
184
185         /*
186          * We need to hold j_state_lock until t_updates has been incremented,
187          * for proper journal barrier handling
188          */
189 repeat:
190         read_lock(&journal->j_state_lock);
191         BUG_ON(journal->j_flags & JBD2_UNMOUNT);
192         if (is_journal_aborted(journal) ||
193             (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
194                 read_unlock(&journal->j_state_lock);
195                 jbd2_journal_free_transaction(new_transaction);
196                 return -EROFS;
197         }
198
199         /* Wait on the journal's transaction barrier if necessary */
200         if (journal->j_barrier_count) {
201                 read_unlock(&journal->j_state_lock);
202                 wait_event(journal->j_wait_transaction_locked,
203                                 journal->j_barrier_count == 0);
204                 goto repeat;
205         }
206
207         if (!journal->j_running_transaction) {
208                 read_unlock(&journal->j_state_lock);
209                 if (!new_transaction)
210                         goto alloc_transaction;
211                 write_lock(&journal->j_state_lock);
212                 if (!journal->j_running_transaction) {
213                         jbd2_get_transaction(journal, new_transaction);
214                         new_transaction = NULL;
215                 }
216                 write_unlock(&journal->j_state_lock);
217                 goto repeat;
218         }
219
220         transaction = journal->j_running_transaction;
221
222         /*
223          * If the current transaction is locked down for commit, wait for the
224          * lock to be released.
225          */
226         if (transaction->t_state == T_LOCKED) {
227                 DEFINE_WAIT(wait);
228
229                 prepare_to_wait(&journal->j_wait_transaction_locked,
230                                         &wait, TASK_UNINTERRUPTIBLE);
231                 read_unlock(&journal->j_state_lock);
232                 schedule();
233                 finish_wait(&journal->j_wait_transaction_locked, &wait);
234                 goto repeat;
235         }
236
237         /*
238          * If there is not enough space left in the log to write all potential
239          * buffers requested by this operation, we need to stall pending a log
240          * checkpoint to free some more log space.
241          */
242         needed = atomic_add_return(nblocks,
243                                    &transaction->t_outstanding_credits);
244
245         if (needed > journal->j_max_transaction_buffers) {
246                 /*
247                  * If the current transaction is already too large, then start
248                  * to commit it: we can then go back and attach this handle to
249                  * a new transaction.
250                  */
251                 DEFINE_WAIT(wait);
252
253                 jbd_debug(2, "Handle %p starting new commit...\n", handle);
254                 atomic_sub(nblocks, &transaction->t_outstanding_credits);
255                 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
256                                 TASK_UNINTERRUPTIBLE);
257                 tid = transaction->t_tid;
258                 need_to_start = !tid_geq(journal->j_commit_request, tid);
259                 read_unlock(&journal->j_state_lock);
260                 if (need_to_start)
261                         jbd2_log_start_commit(journal, tid);
262                 schedule();
263                 finish_wait(&journal->j_wait_transaction_locked, &wait);
264                 goto repeat;
265         }
266
267         /*
268          * The commit code assumes that it can get enough log space
269          * without forcing a checkpoint.  This is *critical* for
270          * correctness: a checkpoint of a buffer which is also
271          * associated with a committing transaction creates a deadlock,
272          * so commit simply cannot force through checkpoints.
273          *
274          * We must therefore ensure the necessary space in the journal
275          * *before* starting to dirty potentially checkpointed buffers
276          * in the new transaction.
277          *
278          * The worst part is, any transaction currently committing can
279          * reduce the free space arbitrarily.  Be careful to account for
280          * those buffers when checkpointing.
281          */
282
283         /*
284          * @@@ AKPM: This seems rather over-defensive.  We're giving commit
285          * a _lot_ of headroom: 1/4 of the journal plus the size of
286          * the committing transaction.  Really, we only need to give it
287          * committing_transaction->t_outstanding_credits plus "enough" for
288          * the log control blocks.
289          * Also, this test is inconsistent with the matching one in
290          * jbd2_journal_extend().
291          */
292         if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
293                 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
294                 atomic_sub(nblocks, &transaction->t_outstanding_credits);
295                 read_unlock(&journal->j_state_lock);
296                 write_lock(&journal->j_state_lock);
297                 if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
298                         __jbd2_log_wait_for_space(journal);
299                 write_unlock(&journal->j_state_lock);
300                 goto repeat;
301         }
302
303         /* OK, account for the buffers that this operation expects to
304          * use and add the handle to the running transaction. 
305          */
306         update_t_max_wait(transaction, ts);
307         handle->h_transaction = transaction;
308         atomic_inc(&transaction->t_updates);
309         atomic_inc(&transaction->t_handle_count);
310         jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
311                   handle, nblocks,
312                   atomic_read(&transaction->t_outstanding_credits),
313                   __jbd2_log_space_left(journal));
314         read_unlock(&journal->j_state_lock);
315
316         lock_map_acquire(&handle->h_lockdep_map);
317         jbd2_journal_free_transaction(new_transaction);
318         return 0;
319 }
320
321 static struct lock_class_key jbd2_handle_key;
322
323 /* Allocate a new handle.  This should probably be in a slab... */
324 static handle_t *new_handle(int nblocks)
325 {
326         handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
327         if (!handle)
328                 return NULL;
329         memset(handle, 0, sizeof(*handle));
330         handle->h_buffer_credits = nblocks;
331         handle->h_ref = 1;
332
333         lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
334                                                 &jbd2_handle_key, 0);
335
336         return handle;
337 }
338
339 /**
340  * handle_t *jbd2_journal_start() - Obtain a new handle.
341  * @journal: Journal to start transaction on.
342  * @nblocks: number of block buffer we might modify
343  *
344  * We make sure that the transaction can guarantee at least nblocks of
345  * modified buffers in the log.  We block until the log can guarantee
346  * that much space.
347  *
348  * This function is visible to journal users (like ext3fs), so is not
349  * called with the journal already locked.
350  *
351  * Return a pointer to a newly allocated handle, or an ERR_PTR() value
352  * on failure.
353  */
354 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask)
355 {
356         handle_t *handle = journal_current_handle();
357         int err;
358
359         if (!journal)
360                 return ERR_PTR(-EROFS);
361
362         if (handle) {
363                 J_ASSERT(handle->h_transaction->t_journal == journal);
364                 handle->h_ref++;
365                 return handle;
366         }
367
368         handle = new_handle(nblocks);
369         if (!handle)
370                 return ERR_PTR(-ENOMEM);
371
372         current->journal_info = handle;
373
374         err = start_this_handle(journal, handle, gfp_mask);
375         if (err < 0) {
376                 jbd2_free_handle(handle);
377                 current->journal_info = NULL;
378                 handle = ERR_PTR(err);
379         }
380         return handle;
381 }
382 EXPORT_SYMBOL(jbd2__journal_start);
383
384
385 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
386 {
387         return jbd2__journal_start(journal, nblocks, GFP_NOFS);
388 }
389 EXPORT_SYMBOL(jbd2_journal_start);
390
391
392 /**
393  * int jbd2_journal_extend() - extend buffer credits.
394  * @handle:  handle to 'extend'
395  * @nblocks: nr blocks to try to extend by.
396  *
397  * Some transactions, such as large extends and truncates, can be done
398  * atomically all at once or in several stages.  The operation requests
399  * a credit for a number of buffer modications in advance, but can
400  * extend its credit if it needs more.
401  *
402  * jbd2_journal_extend tries to give the running handle more buffer credits.
403  * It does not guarantee that allocation - this is a best-effort only.
404  * The calling process MUST be able to deal cleanly with a failure to
405  * extend here.
406  *
407  * Return 0 on success, non-zero on failure.
408  *
409  * return code < 0 implies an error
410  * return code > 0 implies normal transaction-full status.
411  */
412 int jbd2_journal_extend(handle_t *handle, int nblocks)
413 {
414         transaction_t *transaction = handle->h_transaction;
415         journal_t *journal = transaction->t_journal;
416         int result;
417         int wanted;
418
419         result = -EIO;
420         if (is_handle_aborted(handle))
421                 goto out;
422
423         result = 1;
424
425         read_lock(&journal->j_state_lock);
426
427         /* Don't extend a locked-down transaction! */
428         if (handle->h_transaction->t_state != T_RUNNING) {
429                 jbd_debug(3, "denied handle %p %d blocks: "
430                           "transaction not running\n", handle, nblocks);
431                 goto error_out;
432         }
433
434         spin_lock(&transaction->t_handle_lock);
435         wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
436
437         if (wanted > journal->j_max_transaction_buffers) {
438                 jbd_debug(3, "denied handle %p %d blocks: "
439                           "transaction too large\n", handle, nblocks);
440                 goto unlock;
441         }
442
443         if (wanted > __jbd2_log_space_left(journal)) {
444                 jbd_debug(3, "denied handle %p %d blocks: "
445                           "insufficient log space\n", handle, nblocks);
446                 goto unlock;
447         }
448
449         handle->h_buffer_credits += nblocks;
450         atomic_add(nblocks, &transaction->t_outstanding_credits);
451         result = 0;
452
453         jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
454 unlock:
455         spin_unlock(&transaction->t_handle_lock);
456 error_out:
457         read_unlock(&journal->j_state_lock);
458 out:
459         return result;
460 }
461
462
463 /**
464  * int jbd2_journal_restart() - restart a handle .
465  * @handle:  handle to restart
466  * @nblocks: nr credits requested
467  *
468  * Restart a handle for a multi-transaction filesystem
469  * operation.
470  *
471  * If the jbd2_journal_extend() call above fails to grant new buffer credits
472  * to a running handle, a call to jbd2_journal_restart will commit the
473  * handle's transaction so far and reattach the handle to a new
474  * transaction capabable of guaranteeing the requested number of
475  * credits.
476  */
477 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
478 {
479         transaction_t *transaction = handle->h_transaction;
480         journal_t *journal = transaction->t_journal;
481         tid_t           tid;
482         int             need_to_start, ret;
483
484         /* If we've had an abort of any type, don't even think about
485          * actually doing the restart! */
486         if (is_handle_aborted(handle))
487                 return 0;
488
489         /*
490          * First unlink the handle from its current transaction, and start the
491          * commit on that.
492          */
493         J_ASSERT(atomic_read(&transaction->t_updates) > 0);
494         J_ASSERT(journal_current_handle() == handle);
495
496         read_lock(&journal->j_state_lock);
497         spin_lock(&transaction->t_handle_lock);
498         atomic_sub(handle->h_buffer_credits,
499                    &transaction->t_outstanding_credits);
500         if (atomic_dec_and_test(&transaction->t_updates))
501                 wake_up(&journal->j_wait_updates);
502         spin_unlock(&transaction->t_handle_lock);
503
504         jbd_debug(2, "restarting handle %p\n", handle);
505         tid = transaction->t_tid;
506         need_to_start = !tid_geq(journal->j_commit_request, tid);
507         read_unlock(&journal->j_state_lock);
508         if (need_to_start)
509                 jbd2_log_start_commit(journal, tid);
510
511         lock_map_release(&handle->h_lockdep_map);
512         handle->h_buffer_credits = nblocks;
513         ret = start_this_handle(journal, handle, gfp_mask);
514         return ret;
515 }
516 EXPORT_SYMBOL(jbd2__journal_restart);
517
518
519 int jbd2_journal_restart(handle_t *handle, int nblocks)
520 {
521         return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
522 }
523 EXPORT_SYMBOL(jbd2_journal_restart);
524
525 /**
526  * void jbd2_journal_lock_updates () - establish a transaction barrier.
527  * @journal:  Journal to establish a barrier on.
528  *
529  * This locks out any further updates from being started, and blocks
530  * until all existing updates have completed, returning only once the
531  * journal is in a quiescent state with no updates running.
532  *
533  * The journal lock should not be held on entry.
534  */
535 void jbd2_journal_lock_updates(journal_t *journal)
536 {
537         DEFINE_WAIT(wait);
538
539         write_lock(&journal->j_state_lock);
540         ++journal->j_barrier_count;
541
542         /* Wait until there are no running updates */
543         while (1) {
544                 transaction_t *transaction = journal->j_running_transaction;
545
546                 if (!transaction)
547                         break;
548
549                 spin_lock(&transaction->t_handle_lock);
550                 prepare_to_wait(&journal->j_wait_updates, &wait,
551                                 TASK_UNINTERRUPTIBLE);
552                 if (!atomic_read(&transaction->t_updates)) {
553                         spin_unlock(&transaction->t_handle_lock);
554                         finish_wait(&journal->j_wait_updates, &wait);
555                         break;
556                 }
557                 spin_unlock(&transaction->t_handle_lock);
558                 write_unlock(&journal->j_state_lock);
559                 schedule();
560                 finish_wait(&journal->j_wait_updates, &wait);
561                 write_lock(&journal->j_state_lock);
562         }
563         write_unlock(&journal->j_state_lock);
564
565         /*
566          * We have now established a barrier against other normal updates, but
567          * we also need to barrier against other jbd2_journal_lock_updates() calls
568          * to make sure that we serialise special journal-locked operations
569          * too.
570          */
571         mutex_lock(&journal->j_barrier);
572 }
573
574 /**
575  * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
576  * @journal:  Journal to release the barrier on.
577  *
578  * Release a transaction barrier obtained with jbd2_journal_lock_updates().
579  *
580  * Should be called without the journal lock held.
581  */
582 void jbd2_journal_unlock_updates (journal_t *journal)
583 {
584         J_ASSERT(journal->j_barrier_count != 0);
585
586         mutex_unlock(&journal->j_barrier);
587         write_lock(&journal->j_state_lock);
588         --journal->j_barrier_count;
589         write_unlock(&journal->j_state_lock);
590         wake_up(&journal->j_wait_transaction_locked);
591 }
592
593 static void warn_dirty_buffer(struct buffer_head *bh)
594 {
595         char b[BDEVNAME_SIZE];
596
597         printk(KERN_WARNING
598                "JBD2: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
599                "There's a risk of filesystem corruption in case of system "
600                "crash.\n",
601                bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
602 }
603
604 /*
605  * If the buffer is already part of the current transaction, then there
606  * is nothing we need to do.  If it is already part of a prior
607  * transaction which we are still committing to disk, then we need to
608  * make sure that we do not overwrite the old copy: we do copy-out to
609  * preserve the copy going to disk.  We also account the buffer against
610  * the handle's metadata buffer credits (unless the buffer is already
611  * part of the transaction, that is).
612  *
613  */
614 static int
615 do_get_write_access(handle_t *handle, struct journal_head *jh,
616                         int force_copy)
617 {
618         struct buffer_head *bh;
619         transaction_t *transaction;
620         journal_t *journal;
621         int error;
622         char *frozen_buffer = NULL;
623         int need_copy = 0;
624
625         if (is_handle_aborted(handle))
626                 return -EROFS;
627
628         transaction = handle->h_transaction;
629         journal = transaction->t_journal;
630
631         jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
632
633         JBUFFER_TRACE(jh, "entry");
634 repeat:
635         bh = jh2bh(jh);
636
637         /* @@@ Need to check for errors here at some point. */
638
639         lock_buffer(bh);
640         jbd_lock_bh_state(bh);
641
642         /* We now hold the buffer lock so it is safe to query the buffer
643          * state.  Is the buffer dirty?
644          *
645          * If so, there are two possibilities.  The buffer may be
646          * non-journaled, and undergoing a quite legitimate writeback.
647          * Otherwise, it is journaled, and we don't expect dirty buffers
648          * in that state (the buffers should be marked JBD_Dirty
649          * instead.)  So either the IO is being done under our own
650          * control and this is a bug, or it's a third party IO such as
651          * dump(8) (which may leave the buffer scheduled for read ---
652          * ie. locked but not dirty) or tune2fs (which may actually have
653          * the buffer dirtied, ugh.)  */
654
655         if (buffer_dirty(bh)) {
656                 /*
657                  * First question: is this buffer already part of the current
658                  * transaction or the existing committing transaction?
659                  */
660                 if (jh->b_transaction) {
661                         J_ASSERT_JH(jh,
662                                 jh->b_transaction == transaction ||
663                                 jh->b_transaction ==
664                                         journal->j_committing_transaction);
665                         if (jh->b_next_transaction)
666                                 J_ASSERT_JH(jh, jh->b_next_transaction ==
667                                                         transaction);
668                         warn_dirty_buffer(bh);
669                 }
670                 /*
671                  * In any case we need to clean the dirty flag and we must
672                  * do it under the buffer lock to be sure we don't race
673                  * with running write-out.
674                  */
675                 JBUFFER_TRACE(jh, "Journalling dirty buffer");
676                 clear_buffer_dirty(bh);
677                 set_buffer_jbddirty(bh);
678         }
679
680         unlock_buffer(bh);
681
682         error = -EROFS;
683         if (is_handle_aborted(handle)) {
684                 jbd_unlock_bh_state(bh);
685                 goto out;
686         }
687         error = 0;
688
689         /*
690          * The buffer is already part of this transaction if b_transaction or
691          * b_next_transaction points to it
692          */
693         if (jh->b_transaction == transaction ||
694             jh->b_next_transaction == transaction)
695                 goto done;
696
697         /*
698          * this is the first time this transaction is touching this buffer,
699          * reset the modified flag
700          */
701        jh->b_modified = 0;
702
703         /*
704          * If there is already a copy-out version of this buffer, then we don't
705          * need to make another one
706          */
707         if (jh->b_frozen_data) {
708                 JBUFFER_TRACE(jh, "has frozen data");
709                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
710                 jh->b_next_transaction = transaction;
711                 goto done;
712         }
713
714         /* Is there data here we need to preserve? */
715
716         if (jh->b_transaction && jh->b_transaction != transaction) {
717                 JBUFFER_TRACE(jh, "owned by older transaction");
718                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
719                 J_ASSERT_JH(jh, jh->b_transaction ==
720                                         journal->j_committing_transaction);
721
722                 /* There is one case we have to be very careful about.
723                  * If the committing transaction is currently writing
724                  * this buffer out to disk and has NOT made a copy-out,
725                  * then we cannot modify the buffer contents at all
726                  * right now.  The essence of copy-out is that it is the
727                  * extra copy, not the primary copy, which gets
728                  * journaled.  If the primary copy is already going to
729                  * disk then we cannot do copy-out here. */
730
731                 if (jh->b_jlist == BJ_Shadow) {
732                         DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
733                         wait_queue_head_t *wqh;
734
735                         wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
736
737                         JBUFFER_TRACE(jh, "on shadow: sleep");
738                         jbd_unlock_bh_state(bh);
739                         /* commit wakes up all shadow buffers after IO */
740                         for ( ; ; ) {
741                                 prepare_to_wait(wqh, &wait.wait,
742                                                 TASK_UNINTERRUPTIBLE);
743                                 if (jh->b_jlist != BJ_Shadow)
744                                         break;
745                                 schedule();
746                         }
747                         finish_wait(wqh, &wait.wait);
748                         goto repeat;
749                 }
750
751                 /* Only do the copy if the currently-owning transaction
752                  * still needs it.  If it is on the Forget list, the
753                  * committing transaction is past that stage.  The
754                  * buffer had better remain locked during the kmalloc,
755                  * but that should be true --- we hold the journal lock
756                  * still and the buffer is already on the BUF_JOURNAL
757                  * list so won't be flushed.
758                  *
759                  * Subtle point, though: if this is a get_undo_access,
760                  * then we will be relying on the frozen_data to contain
761                  * the new value of the committed_data record after the
762                  * transaction, so we HAVE to force the frozen_data copy
763                  * in that case. */
764
765                 if (jh->b_jlist != BJ_Forget || force_copy) {
766                         JBUFFER_TRACE(jh, "generate frozen data");
767                         if (!frozen_buffer) {
768                                 JBUFFER_TRACE(jh, "allocate memory for buffer");
769                                 jbd_unlock_bh_state(bh);
770                                 frozen_buffer =
771                                         jbd2_alloc(jh2bh(jh)->b_size,
772                                                          GFP_NOFS);
773                                 if (!frozen_buffer) {
774                                         printk(KERN_EMERG
775                                                "%s: OOM for frozen_buffer\n",
776                                                __func__);
777                                         JBUFFER_TRACE(jh, "oom!");
778                                         error = -ENOMEM;
779                                         jbd_lock_bh_state(bh);
780                                         goto done;
781                                 }
782                                 goto repeat;
783                         }
784                         jh->b_frozen_data = frozen_buffer;
785                         frozen_buffer = NULL;
786                         need_copy = 1;
787                 }
788                 jh->b_next_transaction = transaction;
789         }
790
791
792         /*
793          * Finally, if the buffer is not journaled right now, we need to make
794          * sure it doesn't get written to disk before the caller actually
795          * commits the new data
796          */
797         if (!jh->b_transaction) {
798                 JBUFFER_TRACE(jh, "no transaction");
799                 J_ASSERT_JH(jh, !jh->b_next_transaction);
800                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
801                 spin_lock(&journal->j_list_lock);
802                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
803                 spin_unlock(&journal->j_list_lock);
804         }
805
806 done:
807         if (need_copy) {
808                 struct page *page;
809                 int offset;
810                 char *source;
811
812                 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
813                             "Possible IO failure.\n");
814                 page = jh2bh(jh)->b_page;
815                 offset = offset_in_page(jh2bh(jh)->b_data);
816                 source = kmap_atomic(page);
817                 /* Fire data frozen trigger just before we copy the data */
818                 jbd2_buffer_frozen_trigger(jh, source + offset,
819                                            jh->b_triggers);
820                 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
821                 kunmap_atomic(source);
822
823                 /*
824                  * Now that the frozen data is saved off, we need to store
825                  * any matching triggers.
826                  */
827                 jh->b_frozen_triggers = jh->b_triggers;
828         }
829         jbd_unlock_bh_state(bh);
830
831         /*
832          * If we are about to journal a buffer, then any revoke pending on it is
833          * no longer valid
834          */
835         jbd2_journal_cancel_revoke(handle, jh);
836
837 out:
838         if (unlikely(frozen_buffer))    /* It's usually NULL */
839                 jbd2_free(frozen_buffer, bh->b_size);
840
841         JBUFFER_TRACE(jh, "exit");
842         return error;
843 }
844
845 /**
846  * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
847  * @handle: transaction to add buffer modifications to
848  * @bh:     bh to be used for metadata writes
849  *
850  * Returns an error code or 0 on success.
851  *
852  * In full data journalling mode the buffer may be of type BJ_AsyncData,
853  * because we're write()ing a buffer which is also part of a shared mapping.
854  */
855
856 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
857 {
858         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
859         int rc;
860
861         /* We do not want to get caught playing with fields which the
862          * log thread also manipulates.  Make sure that the buffer
863          * completes any outstanding IO before proceeding. */
864         rc = do_get_write_access(handle, jh, 0);
865         jbd2_journal_put_journal_head(jh);
866         return rc;
867 }
868
869
870 /*
871  * When the user wants to journal a newly created buffer_head
872  * (ie. getblk() returned a new buffer and we are going to populate it
873  * manually rather than reading off disk), then we need to keep the
874  * buffer_head locked until it has been completely filled with new
875  * data.  In this case, we should be able to make the assertion that
876  * the bh is not already part of an existing transaction.
877  *
878  * The buffer should already be locked by the caller by this point.
879  * There is no lock ranking violation: it was a newly created,
880  * unlocked buffer beforehand. */
881
882 /**
883  * int jbd2_journal_get_create_access () - notify intent to use newly created bh
884  * @handle: transaction to new buffer to
885  * @bh: new buffer.
886  *
887  * Call this if you create a new bh.
888  */
889 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
890 {
891         transaction_t *transaction = handle->h_transaction;
892         journal_t *journal = transaction->t_journal;
893         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
894         int err;
895
896         jbd_debug(5, "journal_head %p\n", jh);
897         err = -EROFS;
898         if (is_handle_aborted(handle))
899                 goto out;
900         err = 0;
901
902         JBUFFER_TRACE(jh, "entry");
903         /*
904          * The buffer may already belong to this transaction due to pre-zeroing
905          * in the filesystem's new_block code.  It may also be on the previous,
906          * committing transaction's lists, but it HAS to be in Forget state in
907          * that case: the transaction must have deleted the buffer for it to be
908          * reused here.
909          */
910         jbd_lock_bh_state(bh);
911         spin_lock(&journal->j_list_lock);
912         J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
913                 jh->b_transaction == NULL ||
914                 (jh->b_transaction == journal->j_committing_transaction &&
915                           jh->b_jlist == BJ_Forget)));
916
917         J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
918         J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
919
920         if (jh->b_transaction == NULL) {
921                 /*
922                  * Previous jbd2_journal_forget() could have left the buffer
923                  * with jbddirty bit set because it was being committed. When
924                  * the commit finished, we've filed the buffer for
925                  * checkpointing and marked it dirty. Now we are reallocating
926                  * the buffer so the transaction freeing it must have
927                  * committed and so it's safe to clear the dirty bit.
928                  */
929                 clear_buffer_dirty(jh2bh(jh));
930                 /* first access by this transaction */
931                 jh->b_modified = 0;
932
933                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
934                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
935         } else if (jh->b_transaction == journal->j_committing_transaction) {
936                 /* first access by this transaction */
937                 jh->b_modified = 0;
938
939                 JBUFFER_TRACE(jh, "set next transaction");
940                 jh->b_next_transaction = transaction;
941         }
942         spin_unlock(&journal->j_list_lock);
943         jbd_unlock_bh_state(bh);
944
945         /*
946          * akpm: I added this.  ext3_alloc_branch can pick up new indirect
947          * blocks which contain freed but then revoked metadata.  We need
948          * to cancel the revoke in case we end up freeing it yet again
949          * and the reallocating as data - this would cause a second revoke,
950          * which hits an assertion error.
951          */
952         JBUFFER_TRACE(jh, "cancelling revoke");
953         jbd2_journal_cancel_revoke(handle, jh);
954 out:
955         jbd2_journal_put_journal_head(jh);
956         return err;
957 }
958
959 /**
960  * int jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
961  *     non-rewindable consequences
962  * @handle: transaction
963  * @bh: buffer to undo
964  *
965  * Sometimes there is a need to distinguish between metadata which has
966  * been committed to disk and that which has not.  The ext3fs code uses
967  * this for freeing and allocating space, we have to make sure that we
968  * do not reuse freed space until the deallocation has been committed,
969  * since if we overwrote that space we would make the delete
970  * un-rewindable in case of a crash.
971  *
972  * To deal with that, jbd2_journal_get_undo_access requests write access to a
973  * buffer for parts of non-rewindable operations such as delete
974  * operations on the bitmaps.  The journaling code must keep a copy of
975  * the buffer's contents prior to the undo_access call until such time
976  * as we know that the buffer has definitely been committed to disk.
977  *
978  * We never need to know which transaction the committed data is part
979  * of, buffers touched here are guaranteed to be dirtied later and so
980  * will be committed to a new transaction in due course, at which point
981  * we can discard the old committed data pointer.
982  *
983  * Returns error number or 0 on success.
984  */
985 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
986 {
987         int err;
988         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
989         char *committed_data = NULL;
990
991         JBUFFER_TRACE(jh, "entry");
992
993         /*
994          * Do this first --- it can drop the journal lock, so we want to
995          * make sure that obtaining the committed_data is done
996          * atomically wrt. completion of any outstanding commits.
997          */
998         err = do_get_write_access(handle, jh, 1);
999         if (err)
1000                 goto out;
1001
1002 repeat:
1003         if (!jh->b_committed_data) {
1004                 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
1005                 if (!committed_data) {
1006                         printk(KERN_EMERG "%s: No memory for committed data\n",
1007                                 __func__);
1008                         err = -ENOMEM;
1009                         goto out;
1010                 }
1011         }
1012
1013         jbd_lock_bh_state(bh);
1014         if (!jh->b_committed_data) {
1015                 /* Copy out the current buffer contents into the
1016                  * preserved, committed copy. */
1017                 JBUFFER_TRACE(jh, "generate b_committed data");
1018                 if (!committed_data) {
1019                         jbd_unlock_bh_state(bh);
1020                         goto repeat;
1021                 }
1022
1023                 jh->b_committed_data = committed_data;
1024                 committed_data = NULL;
1025                 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
1026         }
1027         jbd_unlock_bh_state(bh);
1028 out:
1029         jbd2_journal_put_journal_head(jh);
1030         if (unlikely(committed_data))
1031                 jbd2_free(committed_data, bh->b_size);
1032         return err;
1033 }
1034
1035 /**
1036  * void jbd2_journal_set_triggers() - Add triggers for commit writeout
1037  * @bh: buffer to trigger on
1038  * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
1039  *
1040  * Set any triggers on this journal_head.  This is always safe, because
1041  * triggers for a committing buffer will be saved off, and triggers for
1042  * a running transaction will match the buffer in that transaction.
1043  *
1044  * Call with NULL to clear the triggers.
1045  */
1046 void jbd2_journal_set_triggers(struct buffer_head *bh,
1047                                struct jbd2_buffer_trigger_type *type)
1048 {
1049         struct journal_head *jh = bh2jh(bh);
1050
1051         jh->b_triggers = type;
1052 }
1053
1054 void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
1055                                 struct jbd2_buffer_trigger_type *triggers)
1056 {
1057         struct buffer_head *bh = jh2bh(jh);
1058
1059         if (!triggers || !triggers->t_frozen)
1060                 return;
1061
1062         triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
1063 }
1064
1065 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1066                                struct jbd2_buffer_trigger_type *triggers)
1067 {
1068         if (!triggers || !triggers->t_abort)
1069                 return;
1070
1071         triggers->t_abort(triggers, jh2bh(jh));
1072 }
1073
1074
1075
1076 /**
1077  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
1078  * @handle: transaction to add buffer to.
1079  * @bh: buffer to mark
1080  *
1081  * mark dirty metadata which needs to be journaled as part of the current
1082  * transaction.
1083  *
1084  * The buffer must have previously had jbd2_journal_get_write_access()
1085  * called so that it has a valid journal_head attached to the buffer
1086  * head.
1087  *
1088  * The buffer is placed on the transaction's metadata list and is marked
1089  * as belonging to the transaction.
1090  *
1091  * Returns error number or 0 on success.
1092  *
1093  * Special care needs to be taken if the buffer already belongs to the
1094  * current committing transaction (in which case we should have frozen
1095  * data present for that commit).  In that case, we don't relink the
1096  * buffer: that only gets done when the old transaction finally
1097  * completes its commit.
1098  */
1099 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1100 {
1101         transaction_t *transaction = handle->h_transaction;
1102         journal_t *journal = transaction->t_journal;
1103         struct journal_head *jh = bh2jh(bh);
1104         int ret = 0;
1105
1106         jbd_debug(5, "journal_head %p\n", jh);
1107         JBUFFER_TRACE(jh, "entry");
1108         if (is_handle_aborted(handle))
1109                 goto out;
1110         if (!buffer_jbd(bh)) {
1111                 ret = -EUCLEAN;
1112                 goto out;
1113         }
1114
1115         jbd_lock_bh_state(bh);
1116
1117         if (jh->b_modified == 0) {
1118                 /*
1119                  * This buffer's got modified and becoming part
1120                  * of the transaction. This needs to be done
1121                  * once a transaction -bzzz
1122                  */
1123                 jh->b_modified = 1;
1124                 J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
1125                 handle->h_buffer_credits--;
1126         }
1127
1128         /*
1129          * fastpath, to avoid expensive locking.  If this buffer is already
1130          * on the running transaction's metadata list there is nothing to do.
1131          * Nobody can take it off again because there is a handle open.
1132          * I _think_ we're OK here with SMP barriers - a mistaken decision will
1133          * result in this test being false, so we go in and take the locks.
1134          */
1135         if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1136                 JBUFFER_TRACE(jh, "fastpath");
1137                 if (unlikely(jh->b_transaction !=
1138                              journal->j_running_transaction)) {
1139                         printk(KERN_EMERG "JBD: %s: "
1140                                "jh->b_transaction (%llu, %p, %u) != "
1141                                "journal->j_running_transaction (%p, %u)",
1142                                journal->j_devname,
1143                                (unsigned long long) bh->b_blocknr,
1144                                jh->b_transaction,
1145                                jh->b_transaction ? jh->b_transaction->t_tid : 0,
1146                                journal->j_running_transaction,
1147                                journal->j_running_transaction ?
1148                                journal->j_running_transaction->t_tid : 0);
1149                         ret = -EINVAL;
1150                 }
1151                 goto out_unlock_bh;
1152         }
1153
1154         set_buffer_jbddirty(bh);
1155
1156         /*
1157          * Metadata already on the current transaction list doesn't
1158          * need to be filed.  Metadata on another transaction's list must
1159          * be committing, and will be refiled once the commit completes:
1160          * leave it alone for now.
1161          */
1162         if (jh->b_transaction != transaction) {
1163                 JBUFFER_TRACE(jh, "already on other transaction");
1164                 if (unlikely(jh->b_transaction !=
1165                              journal->j_committing_transaction)) {
1166                         printk(KERN_EMERG "JBD: %s: "
1167                                "jh->b_transaction (%llu, %p, %u) != "
1168                                "journal->j_committing_transaction (%p, %u)",
1169                                journal->j_devname,
1170                                (unsigned long long) bh->b_blocknr,
1171                                jh->b_transaction,
1172                                jh->b_transaction ? jh->b_transaction->t_tid : 0,
1173                                journal->j_committing_transaction,
1174                                journal->j_committing_transaction ?
1175                                journal->j_committing_transaction->t_tid : 0);
1176                         ret = -EINVAL;
1177                 }
1178                 if (unlikely(jh->b_next_transaction != transaction)) {
1179                         printk(KERN_EMERG "JBD: %s: "
1180                                "jh->b_next_transaction (%llu, %p, %u) != "
1181                                "transaction (%p, %u)",
1182                                journal->j_devname,
1183                                (unsigned long long) bh->b_blocknr,
1184                                jh->b_next_transaction,
1185                                jh->b_next_transaction ?
1186                                jh->b_next_transaction->t_tid : 0,
1187                                transaction, transaction->t_tid);
1188                         ret = -EINVAL;
1189                 }
1190                 /* And this case is illegal: we can't reuse another
1191                  * transaction's data buffer, ever. */
1192                 goto out_unlock_bh;
1193         }
1194
1195         /* That test should have eliminated the following case: */
1196         J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1197
1198         JBUFFER_TRACE(jh, "file as BJ_Metadata");
1199         spin_lock(&journal->j_list_lock);
1200         __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1201         spin_unlock(&journal->j_list_lock);
1202 out_unlock_bh:
1203         jbd_unlock_bh_state(bh);
1204 out:
1205         JBUFFER_TRACE(jh, "exit");
1206         WARN_ON(ret);   /* All errors are bugs, so dump the stack */
1207         return ret;
1208 }
1209
1210 /**
1211  * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1212  * @handle: transaction handle
1213  * @bh:     bh to 'forget'
1214  *
1215  * We can only do the bforget if there are no commits pending against the
1216  * buffer.  If the buffer is dirty in the current running transaction we
1217  * can safely unlink it.
1218  *
1219  * bh may not be a journalled buffer at all - it may be a non-JBD
1220  * buffer which came off the hashtable.  Check for this.
1221  *
1222  * Decrements bh->b_count by one.
1223  *
1224  * Allow this call even if the handle has aborted --- it may be part of
1225  * the caller's cleanup after an abort.
1226  */
1227 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1228 {
1229         transaction_t *transaction = handle->h_transaction;
1230         journal_t *journal = transaction->t_journal;
1231         struct journal_head *jh;
1232         int drop_reserve = 0;
1233         int err = 0;
1234         int was_modified = 0;
1235
1236         BUFFER_TRACE(bh, "entry");
1237
1238         jbd_lock_bh_state(bh);
1239         spin_lock(&journal->j_list_lock);
1240
1241         if (!buffer_jbd(bh))
1242                 goto not_jbd;
1243         jh = bh2jh(bh);
1244
1245         /* Critical error: attempting to delete a bitmap buffer, maybe?
1246          * Don't do any jbd operations, and return an error. */
1247         if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1248                          "inconsistent data on disk")) {
1249                 err = -EIO;
1250                 goto not_jbd;
1251         }
1252
1253         /* keep track of whether or not this transaction modified us */
1254         was_modified = jh->b_modified;
1255
1256         /*
1257          * The buffer's going from the transaction, we must drop
1258          * all references -bzzz
1259          */
1260         jh->b_modified = 0;
1261
1262         if (jh->b_transaction == handle->h_transaction) {
1263                 J_ASSERT_JH(jh, !jh->b_frozen_data);
1264
1265                 /* If we are forgetting a buffer which is already part
1266                  * of this transaction, then we can just drop it from
1267                  * the transaction immediately. */
1268                 clear_buffer_dirty(bh);
1269                 clear_buffer_jbddirty(bh);
1270
1271                 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1272
1273                 /*
1274                  * we only want to drop a reference if this transaction
1275                  * modified the buffer
1276                  */
1277                 if (was_modified)
1278                         drop_reserve = 1;
1279
1280                 /*
1281                  * We are no longer going to journal this buffer.
1282                  * However, the commit of this transaction is still
1283                  * important to the buffer: the delete that we are now
1284                  * processing might obsolete an old log entry, so by
1285                  * committing, we can satisfy the buffer's checkpoint.
1286                  *
1287                  * So, if we have a checkpoint on the buffer, we should
1288                  * now refile the buffer on our BJ_Forget list so that
1289                  * we know to remove the checkpoint after we commit.
1290                  */
1291
1292                 if (jh->b_cp_transaction) {
1293                         __jbd2_journal_temp_unlink_buffer(jh);
1294                         __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1295                 } else {
1296                         __jbd2_journal_unfile_buffer(jh);
1297                         if (!buffer_jbd(bh)) {
1298                                 spin_unlock(&journal->j_list_lock);
1299                                 jbd_unlock_bh_state(bh);
1300                                 __bforget(bh);
1301                                 goto drop;
1302                         }
1303                 }
1304         } else if (jh->b_transaction) {
1305                 J_ASSERT_JH(jh, (jh->b_transaction ==
1306                                  journal->j_committing_transaction));
1307                 /* However, if the buffer is still owned by a prior
1308                  * (committing) transaction, we can't drop it yet... */
1309                 JBUFFER_TRACE(jh, "belongs to older transaction");
1310                 /* ... but we CAN drop it from the new transaction if we
1311                  * have also modified it since the original commit. */
1312
1313                 if (jh->b_next_transaction) {
1314                         J_ASSERT(jh->b_next_transaction == transaction);
1315                         jh->b_next_transaction = NULL;
1316
1317                         /*
1318                          * only drop a reference if this transaction modified
1319                          * the buffer
1320                          */
1321                         if (was_modified)
1322                                 drop_reserve = 1;
1323                 }
1324         }
1325
1326 not_jbd:
1327         spin_unlock(&journal->j_list_lock);
1328         jbd_unlock_bh_state(bh);
1329         __brelse(bh);
1330 drop:
1331         if (drop_reserve) {
1332                 /* no need to reserve log space for this block -bzzz */
1333                 handle->h_buffer_credits++;
1334         }
1335         return err;
1336 }
1337
1338 /**
1339  * int jbd2_journal_stop() - complete a transaction
1340  * @handle: tranaction to complete.
1341  *
1342  * All done for a particular handle.
1343  *
1344  * There is not much action needed here.  We just return any remaining
1345  * buffer credits to the transaction and remove the handle.  The only
1346  * complication is that we need to start a commit operation if the
1347  * filesystem is marked for synchronous update.
1348  *
1349  * jbd2_journal_stop itself will not usually return an error, but it may
1350  * do so in unusual circumstances.  In particular, expect it to
1351  * return -EIO if a jbd2_journal_abort has been executed since the
1352  * transaction began.
1353  */
1354 int jbd2_journal_stop(handle_t *handle)
1355 {
1356         transaction_t *transaction = handle->h_transaction;
1357         journal_t *journal = transaction->t_journal;
1358         int err, wait_for_commit = 0;
1359         tid_t tid;
1360         pid_t pid;
1361
1362         J_ASSERT(journal_current_handle() == handle);
1363
1364         if (is_handle_aborted(handle))
1365                 err = -EIO;
1366         else {
1367                 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
1368                 err = 0;
1369         }
1370
1371         if (--handle->h_ref > 0) {
1372                 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1373                           handle->h_ref);
1374                 return err;
1375         }
1376
1377         jbd_debug(4, "Handle %p going down\n", handle);
1378
1379         /*
1380          * Implement synchronous transaction batching.  If the handle
1381          * was synchronous, don't force a commit immediately.  Let's
1382          * yield and let another thread piggyback onto this
1383          * transaction.  Keep doing that while new threads continue to
1384          * arrive.  It doesn't cost much - we're about to run a commit
1385          * and sleep on IO anyway.  Speeds up many-threaded, many-dir
1386          * operations by 30x or more...
1387          *
1388          * We try and optimize the sleep time against what the
1389          * underlying disk can do, instead of having a static sleep
1390          * time.  This is useful for the case where our storage is so
1391          * fast that it is more optimal to go ahead and force a flush
1392          * and wait for the transaction to be committed than it is to
1393          * wait for an arbitrary amount of time for new writers to
1394          * join the transaction.  We achieve this by measuring how
1395          * long it takes to commit a transaction, and compare it with
1396          * how long this transaction has been running, and if run time
1397          * < commit time then we sleep for the delta and commit.  This
1398          * greatly helps super fast disks that would see slowdowns as
1399          * more threads started doing fsyncs.
1400          *
1401          * But don't do this if this process was the most recent one
1402          * to perform a synchronous write.  We do this to detect the
1403          * case where a single process is doing a stream of sync
1404          * writes.  No point in waiting for joiners in that case.
1405          */
1406         pid = current->pid;
1407         if (handle->h_sync && journal->j_last_sync_writer != pid) {
1408                 u64 commit_time, trans_time;
1409
1410                 journal->j_last_sync_writer = pid;
1411
1412                 read_lock(&journal->j_state_lock);
1413                 commit_time = journal->j_average_commit_time;
1414                 read_unlock(&journal->j_state_lock);
1415
1416                 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1417                                                    transaction->t_start_time));
1418
1419                 commit_time = max_t(u64, commit_time,
1420                                     1000*journal->j_min_batch_time);
1421                 commit_time = min_t(u64, commit_time,
1422                                     1000*journal->j_max_batch_time);
1423
1424                 if (trans_time < commit_time) {
1425                         ktime_t expires = ktime_add_ns(ktime_get(),
1426                                                        commit_time);
1427                         set_current_state(TASK_UNINTERRUPTIBLE);
1428                         schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1429                 }
1430         }
1431
1432         if (handle->h_sync)
1433                 transaction->t_synchronous_commit = 1;
1434         current->journal_info = NULL;
1435         atomic_sub(handle->h_buffer_credits,
1436                    &transaction->t_outstanding_credits);
1437
1438         /*
1439          * If the handle is marked SYNC, we need to set another commit
1440          * going!  We also want to force a commit if the current
1441          * transaction is occupying too much of the log, or if the
1442          * transaction is too old now.
1443          */
1444         if (handle->h_sync ||
1445             (atomic_read(&transaction->t_outstanding_credits) >
1446              journal->j_max_transaction_buffers) ||
1447             time_after_eq(jiffies, transaction->t_expires)) {
1448                 /* Do this even for aborted journals: an abort still
1449                  * completes the commit thread, it just doesn't write
1450                  * anything to disk. */
1451
1452                 jbd_debug(2, "transaction too old, requesting commit for "
1453                                         "handle %p\n", handle);
1454                 /* This is non-blocking */
1455                 jbd2_log_start_commit(journal, transaction->t_tid);
1456
1457                 /*
1458                  * Special case: JBD2_SYNC synchronous updates require us
1459                  * to wait for the commit to complete.
1460                  */
1461                 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1462                         wait_for_commit = 1;
1463         }
1464
1465         /*
1466          * Once we drop t_updates, if it goes to zero the transaction
1467          * could start committing on us and eventually disappear.  So
1468          * once we do this, we must not dereference transaction
1469          * pointer again.
1470          */
1471         tid = transaction->t_tid;
1472         if (atomic_dec_and_test(&transaction->t_updates)) {
1473                 wake_up(&journal->j_wait_updates);
1474                 if (journal->j_barrier_count)
1475                         wake_up(&journal->j_wait_transaction_locked);
1476         }
1477
1478         if (wait_for_commit)
1479                 err = jbd2_log_wait_commit(journal, tid);
1480
1481         lock_map_release(&handle->h_lockdep_map);
1482
1483         jbd2_free_handle(handle);
1484         return err;
1485 }
1486
1487 /**
1488  * int jbd2_journal_force_commit() - force any uncommitted transactions
1489  * @journal: journal to force
1490  *
1491  * For synchronous operations: force any uncommitted transactions
1492  * to disk.  May seem kludgy, but it reuses all the handle batching
1493  * code in a very simple manner.
1494  */
1495 int jbd2_journal_force_commit(journal_t *journal)
1496 {
1497         handle_t *handle;
1498         int ret;
1499
1500         handle = jbd2_journal_start(journal, 1);
1501         if (IS_ERR(handle)) {
1502                 ret = PTR_ERR(handle);
1503         } else {
1504                 handle->h_sync = 1;
1505                 ret = jbd2_journal_stop(handle);
1506         }
1507         return ret;
1508 }
1509
1510 /*
1511  *
1512  * List management code snippets: various functions for manipulating the
1513  * transaction buffer lists.
1514  *
1515  */
1516
1517 /*
1518  * Append a buffer to a transaction list, given the transaction's list head
1519  * pointer.
1520  *
1521  * j_list_lock is held.
1522  *
1523  * jbd_lock_bh_state(jh2bh(jh)) is held.
1524  */
1525
1526 static inline void
1527 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1528 {
1529         if (!*list) {
1530                 jh->b_tnext = jh->b_tprev = jh;
1531                 *list = jh;
1532         } else {
1533                 /* Insert at the tail of the list to preserve order */
1534                 struct journal_head *first = *list, *last = first->b_tprev;
1535                 jh->b_tprev = last;
1536                 jh->b_tnext = first;
1537                 last->b_tnext = first->b_tprev = jh;
1538         }
1539 }
1540
1541 /*
1542  * Remove a buffer from a transaction list, given the transaction's list
1543  * head pointer.
1544  *
1545  * Called with j_list_lock held, and the journal may not be locked.
1546  *
1547  * jbd_lock_bh_state(jh2bh(jh)) is held.
1548  */
1549
1550 static inline void
1551 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1552 {
1553         if (*list == jh) {
1554                 *list = jh->b_tnext;
1555                 if (*list == jh)
1556                         *list = NULL;
1557         }
1558         jh->b_tprev->b_tnext = jh->b_tnext;
1559         jh->b_tnext->b_tprev = jh->b_tprev;
1560 }
1561
1562 /*
1563  * Remove a buffer from the appropriate transaction list.
1564  *
1565  * Note that this function can *change* the value of
1566  * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
1567  * t_log_list or t_reserved_list.  If the caller is holding onto a copy of one
1568  * of these pointers, it could go bad.  Generally the caller needs to re-read
1569  * the pointer from the transaction_t.
1570  *
1571  * Called under j_list_lock.
1572  */
1573 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1574 {
1575         struct journal_head **list = NULL;
1576         transaction_t *transaction;
1577         struct buffer_head *bh = jh2bh(jh);
1578
1579         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1580         transaction = jh->b_transaction;
1581         if (transaction)
1582                 assert_spin_locked(&transaction->t_journal->j_list_lock);
1583
1584         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1585         if (jh->b_jlist != BJ_None)
1586                 J_ASSERT_JH(jh, transaction != NULL);
1587
1588         switch (jh->b_jlist) {
1589         case BJ_None:
1590                 return;
1591         case BJ_Metadata:
1592                 transaction->t_nr_buffers--;
1593                 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1594                 list = &transaction->t_buffers;
1595                 break;
1596         case BJ_Forget:
1597                 list = &transaction->t_forget;
1598                 break;
1599         case BJ_IO:
1600                 list = &transaction->t_iobuf_list;
1601                 break;
1602         case BJ_Shadow:
1603                 list = &transaction->t_shadow_list;
1604                 break;
1605         case BJ_LogCtl:
1606                 list = &transaction->t_log_list;
1607                 break;
1608         case BJ_Reserved:
1609                 list = &transaction->t_reserved_list;
1610                 break;
1611         }
1612
1613         __blist_del_buffer(list, jh);
1614         jh->b_jlist = BJ_None;
1615         if (test_clear_buffer_jbddirty(bh))
1616                 mark_buffer_dirty(bh);  /* Expose it to the VM */
1617 }
1618
1619 /*
1620  * Remove buffer from all transactions.
1621  *
1622  * Called with bh_state lock and j_list_lock
1623  *
1624  * jh and bh may be already freed when this function returns.
1625  */
1626 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1627 {
1628         __jbd2_journal_temp_unlink_buffer(jh);
1629         jh->b_transaction = NULL;
1630         jbd2_journal_put_journal_head(jh);
1631 }
1632
1633 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1634 {
1635         struct buffer_head *bh = jh2bh(jh);
1636
1637         /* Get reference so that buffer cannot be freed before we unlock it */
1638         get_bh(bh);
1639         jbd_lock_bh_state(bh);
1640         spin_lock(&journal->j_list_lock);
1641         __jbd2_journal_unfile_buffer(jh);
1642         spin_unlock(&journal->j_list_lock);
1643         jbd_unlock_bh_state(bh);
1644         __brelse(bh);
1645 }
1646
1647 /*
1648  * Called from jbd2_journal_try_to_free_buffers().
1649  *
1650  * Called under jbd_lock_bh_state(bh)
1651  */
1652 static void
1653 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1654 {
1655         struct journal_head *jh;
1656
1657         jh = bh2jh(bh);
1658
1659         if (buffer_locked(bh) || buffer_dirty(bh))
1660                 goto out;
1661
1662         if (jh->b_next_transaction != NULL)
1663                 goto out;
1664
1665         spin_lock(&journal->j_list_lock);
1666         if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
1667                 /* written-back checkpointed metadata buffer */
1668                 JBUFFER_TRACE(jh, "remove from checkpoint list");
1669                 __jbd2_journal_remove_checkpoint(jh);
1670         }
1671         spin_unlock(&journal->j_list_lock);
1672 out:
1673         return;
1674 }
1675
1676 /**
1677  * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1678  * @journal: journal for operation
1679  * @page: to try and free
1680  * @gfp_mask: we use the mask to detect how hard should we try to release
1681  * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1682  * release the buffers.
1683  *
1684  *
1685  * For all the buffers on this page,
1686  * if they are fully written out ordered data, move them onto BUF_CLEAN
1687  * so try_to_free_buffers() can reap them.
1688  *
1689  * This function returns non-zero if we wish try_to_free_buffers()
1690  * to be called. We do this if the page is releasable by try_to_free_buffers().
1691  * We also do it if the page has locked or dirty buffers and the caller wants
1692  * us to perform sync or async writeout.
1693  *
1694  * This complicates JBD locking somewhat.  We aren't protected by the
1695  * BKL here.  We wish to remove the buffer from its committing or
1696  * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1697  *
1698  * This may *change* the value of transaction_t->t_datalist, so anyone
1699  * who looks at t_datalist needs to lock against this function.
1700  *
1701  * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1702  * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
1703  * will come out of the lock with the buffer dirty, which makes it
1704  * ineligible for release here.
1705  *
1706  * Who else is affected by this?  hmm...  Really the only contender
1707  * is do_get_write_access() - it could be looking at the buffer while
1708  * journal_try_to_free_buffer() is changing its state.  But that
1709  * cannot happen because we never reallocate freed data as metadata
1710  * while the data is part of a transaction.  Yes?
1711  *
1712  * Return 0 on failure, 1 on success
1713  */
1714 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1715                                 struct page *page, gfp_t gfp_mask)
1716 {
1717         struct buffer_head *head;
1718         struct buffer_head *bh;
1719         int ret = 0;
1720
1721         J_ASSERT(PageLocked(page));
1722
1723         head = page_buffers(page);
1724         bh = head;
1725         do {
1726                 struct journal_head *jh;
1727
1728                 /*
1729                  * We take our own ref against the journal_head here to avoid
1730                  * having to add tons of locking around each instance of
1731                  * jbd2_journal_put_journal_head().
1732                  */
1733                 jh = jbd2_journal_grab_journal_head(bh);
1734                 if (!jh)
1735                         continue;
1736
1737                 jbd_lock_bh_state(bh);
1738                 __journal_try_to_free_buffer(journal, bh);
1739                 jbd2_journal_put_journal_head(jh);
1740                 jbd_unlock_bh_state(bh);
1741                 if (buffer_jbd(bh))
1742                         goto busy;
1743         } while ((bh = bh->b_this_page) != head);
1744
1745         ret = try_to_free_buffers(page);
1746
1747 busy:
1748         return ret;
1749 }
1750
1751 /*
1752  * This buffer is no longer needed.  If it is on an older transaction's
1753  * checkpoint list we need to record it on this transaction's forget list
1754  * to pin this buffer (and hence its checkpointing transaction) down until
1755  * this transaction commits.  If the buffer isn't on a checkpoint list, we
1756  * release it.
1757  * Returns non-zero if JBD no longer has an interest in the buffer.
1758  *
1759  * Called under j_list_lock.
1760  *
1761  * Called under jbd_lock_bh_state(bh).
1762  */
1763 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1764 {
1765         int may_free = 1;
1766         struct buffer_head *bh = jh2bh(jh);
1767
1768         if (jh->b_cp_transaction) {
1769                 JBUFFER_TRACE(jh, "on running+cp transaction");
1770                 __jbd2_journal_temp_unlink_buffer(jh);
1771                 /*
1772                  * We don't want to write the buffer anymore, clear the
1773                  * bit so that we don't confuse checks in
1774                  * __journal_file_buffer
1775                  */
1776                 clear_buffer_dirty(bh);
1777                 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1778                 may_free = 0;
1779         } else {
1780                 JBUFFER_TRACE(jh, "on running transaction");
1781                 __jbd2_journal_unfile_buffer(jh);
1782         }
1783         return may_free;
1784 }
1785
1786 /*
1787  * jbd2_journal_invalidatepage
1788  *
1789  * This code is tricky.  It has a number of cases to deal with.
1790  *
1791  * There are two invariants which this code relies on:
1792  *
1793  * i_size must be updated on disk before we start calling invalidatepage on the
1794  * data.
1795  *
1796  *  This is done in ext3 by defining an ext3_setattr method which
1797  *  updates i_size before truncate gets going.  By maintaining this
1798  *  invariant, we can be sure that it is safe to throw away any buffers
1799  *  attached to the current transaction: once the transaction commits,
1800  *  we know that the data will not be needed.
1801  *
1802  *  Note however that we can *not* throw away data belonging to the
1803  *  previous, committing transaction!
1804  *
1805  * Any disk blocks which *are* part of the previous, committing
1806  * transaction (and which therefore cannot be discarded immediately) are
1807  * not going to be reused in the new running transaction
1808  *
1809  *  The bitmap committed_data images guarantee this: any block which is
1810  *  allocated in one transaction and removed in the next will be marked
1811  *  as in-use in the committed_data bitmap, so cannot be reused until
1812  *  the next transaction to delete the block commits.  This means that
1813  *  leaving committing buffers dirty is quite safe: the disk blocks
1814  *  cannot be reallocated to a different file and so buffer aliasing is
1815  *  not possible.
1816  *
1817  *
1818  * The above applies mainly to ordered data mode.  In writeback mode we
1819  * don't make guarantees about the order in which data hits disk --- in
1820  * particular we don't guarantee that new dirty data is flushed before
1821  * transaction commit --- so it is always safe just to discard data
1822  * immediately in that mode.  --sct
1823  */
1824
1825 /*
1826  * The journal_unmap_buffer helper function returns zero if the buffer
1827  * concerned remains pinned as an anonymous buffer belonging to an older
1828  * transaction.
1829  *
1830  * We're outside-transaction here.  Either or both of j_running_transaction
1831  * and j_committing_transaction may be NULL.
1832  */
1833 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
1834                                 int partial_page)
1835 {
1836         transaction_t *transaction;
1837         struct journal_head *jh;
1838         int may_free = 1;
1839
1840         BUFFER_TRACE(bh, "entry");
1841
1842 retry:
1843         /*
1844          * It is safe to proceed here without the j_list_lock because the
1845          * buffers cannot be stolen by try_to_free_buffers as long as we are
1846          * holding the page lock. --sct
1847          */
1848
1849         if (!buffer_jbd(bh))
1850                 goto zap_buffer_unlocked;
1851
1852         /* OK, we have data buffer in journaled mode */
1853         write_lock(&journal->j_state_lock);
1854         jbd_lock_bh_state(bh);
1855         spin_lock(&journal->j_list_lock);
1856
1857         jh = jbd2_journal_grab_journal_head(bh);
1858         if (!jh)
1859                 goto zap_buffer_no_jh;
1860
1861         /*
1862          * We cannot remove the buffer from checkpoint lists until the
1863          * transaction adding inode to orphan list (let's call it T)
1864          * is committed.  Otherwise if the transaction changing the
1865          * buffer would be cleaned from the journal before T is
1866          * committed, a crash will cause that the correct contents of
1867          * the buffer will be lost.  On the other hand we have to
1868          * clear the buffer dirty bit at latest at the moment when the
1869          * transaction marking the buffer as freed in the filesystem
1870          * structures is committed because from that moment on the
1871          * block can be reallocated and used by a different page.
1872          * Since the block hasn't been freed yet but the inode has
1873          * already been added to orphan list, it is safe for us to add
1874          * the buffer to BJ_Forget list of the newest transaction.
1875          *
1876          * Also we have to clear buffer_mapped flag of a truncated buffer
1877          * because the buffer_head may be attached to the page straddling
1878          * i_size (can happen only when blocksize < pagesize) and thus the
1879          * buffer_head can be reused when the file is extended again. So we end
1880          * up keeping around invalidated buffers attached to transactions'
1881          * BJ_Forget list just to stop checkpointing code from cleaning up
1882          * the transaction this buffer was modified in.
1883          */
1884         transaction = jh->b_transaction;
1885         if (transaction == NULL) {
1886                 /* First case: not on any transaction.  If it
1887                  * has no checkpoint link, then we can zap it:
1888                  * it's a writeback-mode buffer so we don't care
1889                  * if it hits disk safely. */
1890                 if (!jh->b_cp_transaction) {
1891                         JBUFFER_TRACE(jh, "not on any transaction: zap");
1892                         goto zap_buffer;
1893                 }
1894
1895                 if (!buffer_dirty(bh)) {
1896                         /* bdflush has written it.  We can drop it now */
1897                         goto zap_buffer;
1898                 }
1899
1900                 /* OK, it must be in the journal but still not
1901                  * written fully to disk: it's metadata or
1902                  * journaled data... */
1903
1904                 if (journal->j_running_transaction) {
1905                         /* ... and once the current transaction has
1906                          * committed, the buffer won't be needed any
1907                          * longer. */
1908                         JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1909                         may_free = __dispose_buffer(jh,
1910                                         journal->j_running_transaction);
1911                         goto zap_buffer;
1912                 } else {
1913                         /* There is no currently-running transaction. So the
1914                          * orphan record which we wrote for this file must have
1915                          * passed into commit.  We must attach this buffer to
1916                          * the committing transaction, if it exists. */
1917                         if (journal->j_committing_transaction) {
1918                                 JBUFFER_TRACE(jh, "give to committing trans");
1919                                 may_free = __dispose_buffer(jh,
1920                                         journal->j_committing_transaction);
1921                                 goto zap_buffer;
1922                         } else {
1923                                 /* The orphan record's transaction has
1924                                  * committed.  We can cleanse this buffer */
1925                                 clear_buffer_jbddirty(bh);
1926                                 goto zap_buffer;
1927                         }
1928                 }
1929         } else if (transaction == journal->j_committing_transaction) {
1930                 JBUFFER_TRACE(jh, "on committing transaction");
1931                 /*
1932                  * The buffer is committing, we simply cannot touch
1933                  * it. If the page is straddling i_size we have to wait
1934                  * for commit and try again.
1935                  */
1936                 if (partial_page) {
1937                         tid_t tid = journal->j_committing_transaction->t_tid;
1938
1939                         jbd2_journal_put_journal_head(jh);
1940                         spin_unlock(&journal->j_list_lock);
1941                         jbd_unlock_bh_state(bh);
1942                         write_unlock(&journal->j_state_lock);
1943                         jbd2_log_wait_commit(journal, tid);
1944                         goto retry;
1945                 }
1946                 /*
1947                  * OK, buffer won't be reachable after truncate. We just set
1948                  * j_next_transaction to the running transaction (if there is
1949                  * one) and mark buffer as freed so that commit code knows it
1950                  * should clear dirty bits when it is done with the buffer.
1951                  */
1952                 set_buffer_freed(bh);
1953                 if (journal->j_running_transaction && buffer_jbddirty(bh))
1954                         jh->b_next_transaction = journal->j_running_transaction;
1955                 jbd2_journal_put_journal_head(jh);
1956                 spin_unlock(&journal->j_list_lock);
1957                 jbd_unlock_bh_state(bh);
1958                 write_unlock(&journal->j_state_lock);
1959                 return 0;
1960         } else {
1961                 /* Good, the buffer belongs to the running transaction.
1962                  * We are writing our own transaction's data, not any
1963                  * previous one's, so it is safe to throw it away
1964                  * (remember that we expect the filesystem to have set
1965                  * i_size already for this truncate so recovery will not
1966                  * expose the disk blocks we are discarding here.) */
1967                 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
1968                 JBUFFER_TRACE(jh, "on running transaction");
1969                 may_free = __dispose_buffer(jh, transaction);
1970         }
1971
1972 zap_buffer:
1973         /*
1974          * This is tricky. Although the buffer is truncated, it may be reused
1975          * if blocksize < pagesize and it is attached to the page straddling
1976          * EOF. Since the buffer might have been added to BJ_Forget list of the
1977          * running transaction, journal_get_write_access() won't clear
1978          * b_modified and credit accounting gets confused. So clear b_modified
1979          * here.
1980          */
1981         jh->b_modified = 0;
1982         jbd2_journal_put_journal_head(jh);
1983 zap_buffer_no_jh:
1984         spin_unlock(&journal->j_list_lock);
1985         jbd_unlock_bh_state(bh);
1986         write_unlock(&journal->j_state_lock);
1987 zap_buffer_unlocked:
1988         clear_buffer_dirty(bh);
1989         J_ASSERT_BH(bh, !buffer_jbddirty(bh));
1990         clear_buffer_mapped(bh);
1991         clear_buffer_req(bh);
1992         clear_buffer_new(bh);
1993         clear_buffer_delay(bh);
1994         clear_buffer_unwritten(bh);
1995         bh->b_bdev = NULL;
1996         return may_free;
1997 }
1998
1999 /**
2000  * void jbd2_journal_invalidatepage()
2001  * @journal: journal to use for flush...
2002  * @page:    page to flush
2003  * @offset:  length of page to invalidate.
2004  *
2005  * Reap page buffers containing data after offset in page.
2006  *
2007  */
2008 void jbd2_journal_invalidatepage(journal_t *journal,
2009                       struct page *page,
2010                       unsigned long offset)
2011 {
2012         struct buffer_head *head, *bh, *next;
2013         unsigned int curr_off = 0;
2014         int may_free = 1;
2015
2016         if (!PageLocked(page))
2017                 BUG();
2018         if (!page_has_buffers(page))
2019                 return;
2020
2021         /* We will potentially be playing with lists other than just the
2022          * data lists (especially for journaled data mode), so be
2023          * cautious in our locking. */
2024
2025         head = bh = page_buffers(page);
2026         do {
2027                 unsigned int next_off = curr_off + bh->b_size;
2028                 next = bh->b_this_page;
2029
2030                 if (offset <= curr_off) {
2031                         /* This block is wholly outside the truncation point */
2032                         lock_buffer(bh);
2033                         may_free &= journal_unmap_buffer(journal, bh,
2034                                                          offset > 0);
2035                         unlock_buffer(bh);
2036                 }
2037                 curr_off = next_off;
2038                 bh = next;
2039
2040         } while (bh != head);
2041
2042         if (!offset) {
2043                 if (may_free && try_to_free_buffers(page))
2044                         J_ASSERT(!page_has_buffers(page));
2045         }
2046 }
2047
2048 /*
2049  * File a buffer on the given transaction list.
2050  */
2051 void __jbd2_journal_file_buffer(struct journal_head *jh,
2052                         transaction_t *transaction, int jlist)
2053 {
2054         struct journal_head **list = NULL;
2055         int was_dirty = 0;
2056         struct buffer_head *bh = jh2bh(jh);
2057
2058         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2059         assert_spin_locked(&transaction->t_journal->j_list_lock);
2060
2061         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
2062         J_ASSERT_JH(jh, jh->b_transaction == transaction ||
2063                                 jh->b_transaction == NULL);
2064
2065         if (jh->b_transaction && jh->b_jlist == jlist)
2066                 return;
2067
2068         if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
2069             jlist == BJ_Shadow || jlist == BJ_Forget) {
2070                 /*
2071                  * For metadata buffers, we track dirty bit in buffer_jbddirty
2072                  * instead of buffer_dirty. We should not see a dirty bit set
2073                  * here because we clear it in do_get_write_access but e.g.
2074                  * tune2fs can modify the sb and set the dirty bit at any time
2075                  * so we try to gracefully handle that.
2076                  */
2077                 if (buffer_dirty(bh))
2078                         warn_dirty_buffer(bh);
2079                 if (test_clear_buffer_dirty(bh) ||
2080                     test_clear_buffer_jbddirty(bh))
2081                         was_dirty = 1;
2082         }
2083
2084         if (jh->b_transaction)
2085                 __jbd2_journal_temp_unlink_buffer(jh);
2086         else
2087                 jbd2_journal_grab_journal_head(bh);
2088         jh->b_transaction = transaction;
2089
2090         switch (jlist) {
2091         case BJ_None:
2092                 J_ASSERT_JH(jh, !jh->b_committed_data);
2093                 J_ASSERT_JH(jh, !jh->b_frozen_data);
2094                 return;
2095         case BJ_Metadata:
2096                 transaction->t_nr_buffers++;
2097                 list = &transaction->t_buffers;
2098                 break;
2099         case BJ_Forget:
2100                 list = &transaction->t_forget;
2101                 break;
2102         case BJ_IO:
2103                 list = &transaction->t_iobuf_list;
2104                 break;
2105         case BJ_Shadow:
2106                 list = &transaction->t_shadow_list;
2107                 break;
2108         case BJ_LogCtl:
2109                 list = &transaction->t_log_list;
2110                 break;
2111         case BJ_Reserved:
2112                 list = &transaction->t_reserved_list;
2113                 break;
2114         }
2115
2116         __blist_add_buffer(list, jh);
2117         jh->b_jlist = jlist;
2118
2119         if (was_dirty)
2120                 set_buffer_jbddirty(bh);
2121 }
2122
2123 void jbd2_journal_file_buffer(struct journal_head *jh,
2124                                 transaction_t *transaction, int jlist)
2125 {
2126         jbd_lock_bh_state(jh2bh(jh));
2127         spin_lock(&transaction->t_journal->j_list_lock);
2128         __jbd2_journal_file_buffer(jh, transaction, jlist);
2129         spin_unlock(&transaction->t_journal->j_list_lock);
2130         jbd_unlock_bh_state(jh2bh(jh));
2131 }
2132
2133 /*
2134  * Remove a buffer from its current buffer list in preparation for
2135  * dropping it from its current transaction entirely.  If the buffer has
2136  * already started to be used by a subsequent transaction, refile the
2137  * buffer on that transaction's metadata list.
2138  *
2139  * Called under j_list_lock
2140  * Called under jbd_lock_bh_state(jh2bh(jh))
2141  *
2142  * jh and bh may be already free when this function returns
2143  */
2144 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2145 {
2146         int was_dirty, jlist;
2147         struct buffer_head *bh = jh2bh(jh);
2148
2149         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2150         if (jh->b_transaction)
2151                 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2152
2153         /* If the buffer is now unused, just drop it. */
2154         if (jh->b_next_transaction == NULL) {
2155                 __jbd2_journal_unfile_buffer(jh);
2156                 return;
2157         }
2158
2159         /*
2160          * It has been modified by a later transaction: add it to the new
2161          * transaction's metadata list.
2162          */
2163
2164         was_dirty = test_clear_buffer_jbddirty(bh);
2165         __jbd2_journal_temp_unlink_buffer(jh);
2166         /*
2167          * We set b_transaction here because b_next_transaction will inherit
2168          * our jh reference and thus __jbd2_journal_file_buffer() must not
2169          * take a new one.
2170          */
2171         jh->b_transaction = jh->b_next_transaction;
2172         jh->b_next_transaction = NULL;
2173         if (buffer_freed(bh))
2174                 jlist = BJ_Forget;
2175         else if (jh->b_modified)
2176                 jlist = BJ_Metadata;
2177         else
2178                 jlist = BJ_Reserved;
2179         __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2180         J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2181
2182         if (was_dirty)
2183                 set_buffer_jbddirty(bh);
2184 }
2185
2186 /*
2187  * __jbd2_journal_refile_buffer() with necessary locking added. We take our
2188  * bh reference so that we can safely unlock bh.
2189  *
2190  * The jh and bh may be freed by this call.
2191  */
2192 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2193 {
2194         struct buffer_head *bh = jh2bh(jh);
2195
2196         /* Get reference so that buffer cannot be freed before we unlock it */
2197         get_bh(bh);
2198         jbd_lock_bh_state(bh);
2199         spin_lock(&journal->j_list_lock);
2200         __jbd2_journal_refile_buffer(jh);
2201         jbd_unlock_bh_state(bh);
2202         spin_unlock(&journal->j_list_lock);
2203         __brelse(bh);
2204 }
2205
2206 /*
2207  * File inode in the inode list of the handle's transaction
2208  */
2209 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
2210 {
2211         transaction_t *transaction = handle->h_transaction;
2212         journal_t *journal = transaction->t_journal;
2213
2214         if (is_handle_aborted(handle))
2215                 return -EIO;
2216
2217         jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2218                         transaction->t_tid);
2219
2220         /*
2221          * First check whether inode isn't already on the transaction's
2222          * lists without taking the lock. Note that this check is safe
2223          * without the lock as we cannot race with somebody removing inode
2224          * from the transaction. The reason is that we remove inode from the
2225          * transaction only in journal_release_jbd_inode() and when we commit
2226          * the transaction. We are guarded from the first case by holding
2227          * a reference to the inode. We are safe against the second case
2228          * because if jinode->i_transaction == transaction, commit code
2229          * cannot touch the transaction because we hold reference to it,
2230          * and if jinode->i_next_transaction == transaction, commit code
2231          * will only file the inode where we want it.
2232          */
2233         if (jinode->i_transaction == transaction ||
2234             jinode->i_next_transaction == transaction)
2235                 return 0;
2236
2237         spin_lock(&journal->j_list_lock);
2238
2239         if (jinode->i_transaction == transaction ||
2240             jinode->i_next_transaction == transaction)
2241                 goto done;
2242
2243         /*
2244          * We only ever set this variable to 1 so the test is safe. Since
2245          * t_need_data_flush is likely to be set, we do the test to save some
2246          * cacheline bouncing
2247          */
2248         if (!transaction->t_need_data_flush)
2249                 transaction->t_need_data_flush = 1;
2250         /* On some different transaction's list - should be
2251          * the committing one */
2252         if (jinode->i_transaction) {
2253                 J_ASSERT(jinode->i_next_transaction == NULL);
2254                 J_ASSERT(jinode->i_transaction ==
2255                                         journal->j_committing_transaction);
2256                 jinode->i_next_transaction = transaction;
2257                 goto done;
2258         }
2259         /* Not on any transaction list... */
2260         J_ASSERT(!jinode->i_next_transaction);
2261         jinode->i_transaction = transaction;
2262         list_add(&jinode->i_list, &transaction->t_inode_list);
2263 done:
2264         spin_unlock(&journal->j_list_lock);
2265
2266         return 0;
2267 }
2268
2269 /*
2270  * File truncate and transaction commit interact with each other in a
2271  * non-trivial way.  If a transaction writing data block A is
2272  * committing, we cannot discard the data by truncate until we have
2273  * written them.  Otherwise if we crashed after the transaction with
2274  * write has committed but before the transaction with truncate has
2275  * committed, we could see stale data in block A.  This function is a
2276  * helper to solve this problem.  It starts writeout of the truncated
2277  * part in case it is in the committing transaction.
2278  *
2279  * Filesystem code must call this function when inode is journaled in
2280  * ordered mode before truncation happens and after the inode has been
2281  * placed on orphan list with the new inode size. The second condition
2282  * avoids the race that someone writes new data and we start
2283  * committing the transaction after this function has been called but
2284  * before a transaction for truncate is started (and furthermore it
2285  * allows us to optimize the case where the addition to orphan list
2286  * happens in the same transaction as write --- we don't have to write
2287  * any data in such case).
2288  */
2289 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2290                                         struct jbd2_inode *jinode,
2291                                         loff_t new_size)
2292 {
2293         transaction_t *inode_trans, *commit_trans;
2294         int ret = 0;
2295
2296         /* This is a quick check to avoid locking if not necessary */
2297         if (!jinode->i_transaction)
2298                 goto out;
2299         /* Locks are here just to force reading of recent values, it is
2300          * enough that the transaction was not committing before we started
2301          * a transaction adding the inode to orphan list */
2302         read_lock(&journal->j_state_lock);
2303         commit_trans = journal->j_committing_transaction;
2304         read_unlock(&journal->j_state_lock);
2305         spin_lock(&journal->j_list_lock);
2306         inode_trans = jinode->i_transaction;
2307         spin_unlock(&journal->j_list_lock);
2308         if (inode_trans == commit_trans) {
2309                 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2310                         new_size, LLONG_MAX);
2311                 if (ret)
2312                         jbd2_journal_abort(journal, ret);
2313         }
2314 out:
2315         return ret;
2316 }