2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_log_priv.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_log_recover.h"
41 #include "xfs_extfree_item.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_quota.h"
44 #include "xfs_utils.h"
45 #include "xfs_cksum.h"
46 #include "xfs_trace.h"
47 #include "xfs_icache.h"
48 #include "xfs_icreate_item.h"
50 /* Need all the magic numbers and buffer ops structures from these headers */
51 #include "xfs_symlink.h"
52 #include "xfs_da_btree.h"
53 #include "xfs_dir2_format.h"
54 #include "xfs_dir2_priv.h"
55 #include "xfs_attr_leaf.h"
56 #include "xfs_attr_remote.h"
63 xlog_clear_stale_blocks(
68 xlog_recover_check_summary(
71 #define xlog_recover_check_summary(log)
75 * This structure is used during recovery to record the buf log items which
76 * have been canceled and should not be replayed.
78 struct xfs_buf_cancel {
82 struct list_head bc_list;
86 * Sector aligned buffer routines for buffer create/read/write/access
90 * Verify the given count of basic blocks is valid number of blocks
91 * to specify for an operation involving the given XFS log buffer.
92 * Returns nonzero if the count is valid, 0 otherwise.
96 xlog_buf_bbcount_valid(
100 return bbcount > 0 && bbcount <= log->l_logBBsize;
104 * Allocate a buffer to hold log data. The buffer needs to be able
105 * to map to a range of nbblks basic blocks at any valid (basic
106 * block) offset within the log.
115 if (!xlog_buf_bbcount_valid(log, nbblks)) {
116 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
118 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
123 * We do log I/O in units of log sectors (a power-of-2
124 * multiple of the basic block size), so we round up the
125 * requested size to accommodate the basic blocks required
126 * for complete log sectors.
128 * In addition, the buffer may be used for a non-sector-
129 * aligned block offset, in which case an I/O of the
130 * requested size could extend beyond the end of the
131 * buffer. If the requested size is only 1 basic block it
132 * will never straddle a sector boundary, so this won't be
133 * an issue. Nor will this be a problem if the log I/O is
134 * done in basic blocks (sector size 1). But otherwise we
135 * extend the buffer by one extra log sector to ensure
136 * there's space to accommodate this possibility.
138 if (nbblks > 1 && log->l_sectBBsize > 1)
139 nbblks += log->l_sectBBsize;
140 nbblks = round_up(nbblks, log->l_sectBBsize);
142 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
156 * Return the address of the start of the given block number's data
157 * in a log buffer. The buffer covers a log sector-aligned region.
166 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
168 ASSERT(offset + nbblks <= bp->b_length);
169 return bp->b_addr + BBTOB(offset);
174 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
185 if (!xlog_buf_bbcount_valid(log, nbblks)) {
186 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
188 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
192 blk_no = round_down(blk_no, log->l_sectBBsize);
193 nbblks = round_up(nbblks, log->l_sectBBsize);
196 ASSERT(nbblks <= bp->b_length);
198 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
200 bp->b_io_length = nbblks;
203 xfsbdstrat(log->l_mp, bp);
204 error = xfs_buf_iowait(bp);
206 xfs_buf_ioerror_alert(bp, __func__);
220 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
224 *offset = xlog_align(log, blk_no, nbblks, bp);
229 * Read at an offset into the buffer. Returns with the buffer in it's original
230 * state regardless of the result of the read.
235 xfs_daddr_t blk_no, /* block to read from */
236 int nbblks, /* blocks to read */
240 xfs_caddr_t orig_offset = bp->b_addr;
241 int orig_len = BBTOB(bp->b_length);
244 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
248 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
250 /* must reset buffer pointer even on error */
251 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
258 * Write out the buffer at the given block for the given number of blocks.
259 * The buffer is kept locked across the write and is returned locked.
260 * This can only be used for synchronous log writes.
271 if (!xlog_buf_bbcount_valid(log, nbblks)) {
272 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
274 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
278 blk_no = round_down(blk_no, log->l_sectBBsize);
279 nbblks = round_up(nbblks, log->l_sectBBsize);
282 ASSERT(nbblks <= bp->b_length);
284 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
285 XFS_BUF_ZEROFLAGS(bp);
288 bp->b_io_length = nbblks;
291 error = xfs_bwrite(bp);
293 xfs_buf_ioerror_alert(bp, __func__);
300 * dump debug superblock and log record information
303 xlog_header_check_dump(
305 xlog_rec_header_t *head)
307 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
308 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
309 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
310 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
313 #define xlog_header_check_dump(mp, head)
317 * check log record header for recovery
320 xlog_header_check_recover(
322 xlog_rec_header_t *head)
324 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
327 * IRIX doesn't write the h_fmt field and leaves it zeroed
328 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
329 * a dirty log created in IRIX.
331 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
333 "dirty log written in incompatible format - can't recover");
334 xlog_header_check_dump(mp, head);
335 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
336 XFS_ERRLEVEL_HIGH, mp);
337 return XFS_ERROR(EFSCORRUPTED);
338 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
340 "dirty log entry has mismatched uuid - can't recover");
341 xlog_header_check_dump(mp, head);
342 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
343 XFS_ERRLEVEL_HIGH, mp);
344 return XFS_ERROR(EFSCORRUPTED);
350 * read the head block of the log and check the header
353 xlog_header_check_mount(
355 xlog_rec_header_t *head)
357 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
359 if (uuid_is_nil(&head->h_fs_uuid)) {
361 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
362 * h_fs_uuid is nil, we assume this log was last mounted
363 * by IRIX and continue.
365 xfs_warn(mp, "nil uuid in log - IRIX style log");
366 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
367 xfs_warn(mp, "log has mismatched uuid - can't recover");
368 xlog_header_check_dump(mp, head);
369 XFS_ERROR_REPORT("xlog_header_check_mount",
370 XFS_ERRLEVEL_HIGH, mp);
371 return XFS_ERROR(EFSCORRUPTED);
382 * We're not going to bother about retrying
383 * this during recovery. One strike!
385 xfs_buf_ioerror_alert(bp, __func__);
386 xfs_force_shutdown(bp->b_target->bt_mount,
387 SHUTDOWN_META_IO_ERROR);
390 xfs_buf_ioend(bp, 0);
394 * This routine finds (to an approximation) the first block in the physical
395 * log which contains the given cycle. It uses a binary search algorithm.
396 * Note that the algorithm can not be perfect because the disk will not
397 * necessarily be perfect.
400 xlog_find_cycle_start(
403 xfs_daddr_t first_blk,
404 xfs_daddr_t *last_blk,
414 mid_blk = BLK_AVG(first_blk, end_blk);
415 while (mid_blk != first_blk && mid_blk != end_blk) {
416 error = xlog_bread(log, mid_blk, 1, bp, &offset);
419 mid_cycle = xlog_get_cycle(offset);
420 if (mid_cycle == cycle)
421 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
423 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
424 mid_blk = BLK_AVG(first_blk, end_blk);
426 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
427 (mid_blk == end_blk && mid_blk-1 == first_blk));
435 * Check that a range of blocks does not contain stop_on_cycle_no.
436 * Fill in *new_blk with the block offset where such a block is
437 * found, or with -1 (an invalid block number) if there is no such
438 * block in the range. The scan needs to occur from front to back
439 * and the pointer into the region must be updated since a later
440 * routine will need to perform another test.
443 xlog_find_verify_cycle(
445 xfs_daddr_t start_blk,
447 uint stop_on_cycle_no,
448 xfs_daddr_t *new_blk)
454 xfs_caddr_t buf = NULL;
458 * Greedily allocate a buffer big enough to handle the full
459 * range of basic blocks we'll be examining. If that fails,
460 * try a smaller size. We need to be able to read at least
461 * a log sector, or we're out of luck.
463 bufblks = 1 << ffs(nbblks);
464 while (bufblks > log->l_logBBsize)
466 while (!(bp = xlog_get_bp(log, bufblks))) {
468 if (bufblks < log->l_sectBBsize)
472 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
475 bcount = min(bufblks, (start_blk + nbblks - i));
477 error = xlog_bread(log, i, bcount, bp, &buf);
481 for (j = 0; j < bcount; j++) {
482 cycle = xlog_get_cycle(buf);
483 if (cycle == stop_on_cycle_no) {
500 * Potentially backup over partial log record write.
502 * In the typical case, last_blk is the number of the block directly after
503 * a good log record. Therefore, we subtract one to get the block number
504 * of the last block in the given buffer. extra_bblks contains the number
505 * of blocks we would have read on a previous read. This happens when the
506 * last log record is split over the end of the physical log.
508 * extra_bblks is the number of blocks potentially verified on a previous
509 * call to this routine.
512 xlog_find_verify_log_record(
514 xfs_daddr_t start_blk,
515 xfs_daddr_t *last_blk,
520 xfs_caddr_t offset = NULL;
521 xlog_rec_header_t *head = NULL;
524 int num_blks = *last_blk - start_blk;
527 ASSERT(start_blk != 0 || *last_blk != start_blk);
529 if (!(bp = xlog_get_bp(log, num_blks))) {
530 if (!(bp = xlog_get_bp(log, 1)))
534 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
537 offset += ((num_blks - 1) << BBSHIFT);
540 for (i = (*last_blk) - 1; i >= 0; i--) {
542 /* valid log record not found */
544 "Log inconsistent (didn't find previous header)");
546 error = XFS_ERROR(EIO);
551 error = xlog_bread(log, i, 1, bp, &offset);
556 head = (xlog_rec_header_t *)offset;
558 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
566 * We hit the beginning of the physical log & still no header. Return
567 * to caller. If caller can handle a return of -1, then this routine
568 * will be called again for the end of the physical log.
576 * We have the final block of the good log (the first block
577 * of the log record _before_ the head. So we check the uuid.
579 if ((error = xlog_header_check_mount(log->l_mp, head)))
583 * We may have found a log record header before we expected one.
584 * last_blk will be the 1st block # with a given cycle #. We may end
585 * up reading an entire log record. In this case, we don't want to
586 * reset last_blk. Only when last_blk points in the middle of a log
587 * record do we update last_blk.
589 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
590 uint h_size = be32_to_cpu(head->h_size);
592 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
593 if (h_size % XLOG_HEADER_CYCLE_SIZE)
599 if (*last_blk - i + extra_bblks !=
600 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
609 * Head is defined to be the point of the log where the next log write
610 * write could go. This means that incomplete LR writes at the end are
611 * eliminated when calculating the head. We aren't guaranteed that previous
612 * LR have complete transactions. We only know that a cycle number of
613 * current cycle number -1 won't be present in the log if we start writing
614 * from our current block number.
616 * last_blk contains the block number of the first block with a given
619 * Return: zero if normal, non-zero if error.
624 xfs_daddr_t *return_head_blk)
628 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
630 uint first_half_cycle, last_half_cycle;
632 int error, log_bbnum = log->l_logBBsize;
634 /* Is the end of the log device zeroed? */
635 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
636 *return_head_blk = first_blk;
638 /* Is the whole lot zeroed? */
640 /* Linux XFS shouldn't generate totally zeroed logs -
641 * mkfs etc write a dummy unmount record to a fresh
642 * log so we can store the uuid in there
644 xfs_warn(log->l_mp, "totally zeroed log");
649 xfs_warn(log->l_mp, "empty log check failed");
653 first_blk = 0; /* get cycle # of 1st block */
654 bp = xlog_get_bp(log, 1);
658 error = xlog_bread(log, 0, 1, bp, &offset);
662 first_half_cycle = xlog_get_cycle(offset);
664 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
665 error = xlog_bread(log, last_blk, 1, bp, &offset);
669 last_half_cycle = xlog_get_cycle(offset);
670 ASSERT(last_half_cycle != 0);
673 * If the 1st half cycle number is equal to the last half cycle number,
674 * then the entire log is stamped with the same cycle number. In this
675 * case, head_blk can't be set to zero (which makes sense). The below
676 * math doesn't work out properly with head_blk equal to zero. Instead,
677 * we set it to log_bbnum which is an invalid block number, but this
678 * value makes the math correct. If head_blk doesn't changed through
679 * all the tests below, *head_blk is set to zero at the very end rather
680 * than log_bbnum. In a sense, log_bbnum and zero are the same block
681 * in a circular file.
683 if (first_half_cycle == last_half_cycle) {
685 * In this case we believe that the entire log should have
686 * cycle number last_half_cycle. We need to scan backwards
687 * from the end verifying that there are no holes still
688 * containing last_half_cycle - 1. If we find such a hole,
689 * then the start of that hole will be the new head. The
690 * simple case looks like
691 * x | x ... | x - 1 | x
692 * Another case that fits this picture would be
693 * x | x + 1 | x ... | x
694 * In this case the head really is somewhere at the end of the
695 * log, as one of the latest writes at the beginning was
698 * x | x + 1 | x ... | x - 1 | x
699 * This is really the combination of the above two cases, and
700 * the head has to end up at the start of the x-1 hole at the
703 * In the 256k log case, we will read from the beginning to the
704 * end of the log and search for cycle numbers equal to x-1.
705 * We don't worry about the x+1 blocks that we encounter,
706 * because we know that they cannot be the head since the log
709 head_blk = log_bbnum;
710 stop_on_cycle = last_half_cycle - 1;
713 * In this case we want to find the first block with cycle
714 * number matching last_half_cycle. We expect the log to be
716 * x + 1 ... | x ... | x
717 * The first block with cycle number x (last_half_cycle) will
718 * be where the new head belongs. First we do a binary search
719 * for the first occurrence of last_half_cycle. The binary
720 * search may not be totally accurate, so then we scan back
721 * from there looking for occurrences of last_half_cycle before
722 * us. If that backwards scan wraps around the beginning of
723 * the log, then we look for occurrences of last_half_cycle - 1
724 * at the end of the log. The cases we're looking for look
726 * v binary search stopped here
727 * x + 1 ... | x | x + 1 | x ... | x
728 * ^ but we want to locate this spot
730 * <---------> less than scan distance
731 * x + 1 ... | x ... | x - 1 | x
732 * ^ we want to locate this spot
734 stop_on_cycle = last_half_cycle;
735 if ((error = xlog_find_cycle_start(log, bp, first_blk,
736 &head_blk, last_half_cycle)))
741 * Now validate the answer. Scan back some number of maximum possible
742 * blocks and make sure each one has the expected cycle number. The
743 * maximum is determined by the total possible amount of buffering
744 * in the in-core log. The following number can be made tighter if
745 * we actually look at the block size of the filesystem.
747 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
748 if (head_blk >= num_scan_bblks) {
750 * We are guaranteed that the entire check can be performed
753 start_blk = head_blk - num_scan_bblks;
754 if ((error = xlog_find_verify_cycle(log,
755 start_blk, num_scan_bblks,
756 stop_on_cycle, &new_blk)))
760 } else { /* need to read 2 parts of log */
762 * We are going to scan backwards in the log in two parts.
763 * First we scan the physical end of the log. In this part
764 * of the log, we are looking for blocks with cycle number
765 * last_half_cycle - 1.
766 * If we find one, then we know that the log starts there, as
767 * we've found a hole that didn't get written in going around
768 * the end of the physical log. The simple case for this is
769 * x + 1 ... | x ... | x - 1 | x
770 * <---------> less than scan distance
771 * If all of the blocks at the end of the log have cycle number
772 * last_half_cycle, then we check the blocks at the start of
773 * the log looking for occurrences of last_half_cycle. If we
774 * find one, then our current estimate for the location of the
775 * first occurrence of last_half_cycle is wrong and we move
776 * back to the hole we've found. This case looks like
777 * x + 1 ... | x | x + 1 | x ...
778 * ^ binary search stopped here
779 * Another case we need to handle that only occurs in 256k
781 * x + 1 ... | x ... | x+1 | x ...
782 * ^ binary search stops here
783 * In a 256k log, the scan at the end of the log will see the
784 * x + 1 blocks. We need to skip past those since that is
785 * certainly not the head of the log. By searching for
786 * last_half_cycle-1 we accomplish that.
788 ASSERT(head_blk <= INT_MAX &&
789 (xfs_daddr_t) num_scan_bblks >= head_blk);
790 start_blk = log_bbnum - (num_scan_bblks - head_blk);
791 if ((error = xlog_find_verify_cycle(log, start_blk,
792 num_scan_bblks - (int)head_blk,
793 (stop_on_cycle - 1), &new_blk)))
801 * Scan beginning of log now. The last part of the physical
802 * log is good. This scan needs to verify that it doesn't find
803 * the last_half_cycle.
806 ASSERT(head_blk <= INT_MAX);
807 if ((error = xlog_find_verify_cycle(log,
808 start_blk, (int)head_blk,
809 stop_on_cycle, &new_blk)))
817 * Now we need to make sure head_blk is not pointing to a block in
818 * the middle of a log record.
820 num_scan_bblks = XLOG_REC_SHIFT(log);
821 if (head_blk >= num_scan_bblks) {
822 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
824 /* start ptr at last block ptr before head_blk */
825 if ((error = xlog_find_verify_log_record(log, start_blk,
826 &head_blk, 0)) == -1) {
827 error = XFS_ERROR(EIO);
833 ASSERT(head_blk <= INT_MAX);
834 if ((error = xlog_find_verify_log_record(log, start_blk,
835 &head_blk, 0)) == -1) {
836 /* We hit the beginning of the log during our search */
837 start_blk = log_bbnum - (num_scan_bblks - head_blk);
839 ASSERT(start_blk <= INT_MAX &&
840 (xfs_daddr_t) log_bbnum-start_blk >= 0);
841 ASSERT(head_blk <= INT_MAX);
842 if ((error = xlog_find_verify_log_record(log,
844 (int)head_blk)) == -1) {
845 error = XFS_ERROR(EIO);
849 if (new_blk != log_bbnum)
856 if (head_blk == log_bbnum)
857 *return_head_blk = 0;
859 *return_head_blk = head_blk;
861 * When returning here, we have a good block number. Bad block
862 * means that during a previous crash, we didn't have a clean break
863 * from cycle number N to cycle number N-1. In this case, we need
864 * to find the first block with cycle number N-1.
872 xfs_warn(log->l_mp, "failed to find log head");
877 * Find the sync block number or the tail of the log.
879 * This will be the block number of the last record to have its
880 * associated buffers synced to disk. Every log record header has
881 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
882 * to get a sync block number. The only concern is to figure out which
883 * log record header to believe.
885 * The following algorithm uses the log record header with the largest
886 * lsn. The entire log record does not need to be valid. We only care
887 * that the header is valid.
889 * We could speed up search by using current head_blk buffer, but it is not
895 xfs_daddr_t *head_blk,
896 xfs_daddr_t *tail_blk)
898 xlog_rec_header_t *rhead;
899 xlog_op_header_t *op_head;
900 xfs_caddr_t offset = NULL;
903 xfs_daddr_t umount_data_blk;
904 xfs_daddr_t after_umount_blk;
911 * Find previous log record
913 if ((error = xlog_find_head(log, head_blk)))
916 bp = xlog_get_bp(log, 1);
919 if (*head_blk == 0) { /* special case */
920 error = xlog_bread(log, 0, 1, bp, &offset);
924 if (xlog_get_cycle(offset) == 0) {
926 /* leave all other log inited values alone */
932 * Search backwards looking for log record header block
934 ASSERT(*head_blk < INT_MAX);
935 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
936 error = xlog_bread(log, i, 1, bp, &offset);
940 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
946 * If we haven't found the log record header block, start looking
947 * again from the end of the physical log. XXXmiken: There should be
948 * a check here to make sure we didn't search more than N blocks in
952 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
953 error = xlog_bread(log, i, 1, bp, &offset);
957 if (*(__be32 *)offset ==
958 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
965 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
967 return XFS_ERROR(EIO);
970 /* find blk_no of tail of log */
971 rhead = (xlog_rec_header_t *)offset;
972 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
975 * Reset log values according to the state of the log when we
976 * crashed. In the case where head_blk == 0, we bump curr_cycle
977 * one because the next write starts a new cycle rather than
978 * continuing the cycle of the last good log record. At this
979 * point we have guaranteed that all partial log records have been
980 * accounted for. Therefore, we know that the last good log record
981 * written was complete and ended exactly on the end boundary
982 * of the physical log.
984 log->l_prev_block = i;
985 log->l_curr_block = (int)*head_blk;
986 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
989 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
990 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
991 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
992 BBTOB(log->l_curr_block));
993 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
994 BBTOB(log->l_curr_block));
997 * Look for unmount record. If we find it, then we know there
998 * was a clean unmount. Since 'i' could be the last block in
999 * the physical log, we convert to a log block before comparing
1002 * Save the current tail lsn to use to pass to
1003 * xlog_clear_stale_blocks() below. We won't want to clear the
1004 * unmount record if there is one, so we pass the lsn of the
1005 * unmount record rather than the block after it.
1007 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1008 int h_size = be32_to_cpu(rhead->h_size);
1009 int h_version = be32_to_cpu(rhead->h_version);
1011 if ((h_version & XLOG_VERSION_2) &&
1012 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1013 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1014 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1022 after_umount_blk = (i + hblks + (int)
1023 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1024 tail_lsn = atomic64_read(&log->l_tail_lsn);
1025 if (*head_blk == after_umount_blk &&
1026 be32_to_cpu(rhead->h_num_logops) == 1) {
1027 umount_data_blk = (i + hblks) % log->l_logBBsize;
1028 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1032 op_head = (xlog_op_header_t *)offset;
1033 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1035 * Set tail and last sync so that newly written
1036 * log records will point recovery to after the
1037 * current unmount record.
1039 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1040 log->l_curr_cycle, after_umount_blk);
1041 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1042 log->l_curr_cycle, after_umount_blk);
1043 *tail_blk = after_umount_blk;
1046 * Note that the unmount was clean. If the unmount
1047 * was not clean, we need to know this to rebuild the
1048 * superblock counters from the perag headers if we
1049 * have a filesystem using non-persistent counters.
1051 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1056 * Make sure that there are no blocks in front of the head
1057 * with the same cycle number as the head. This can happen
1058 * because we allow multiple outstanding log writes concurrently,
1059 * and the later writes might make it out before earlier ones.
1061 * We use the lsn from before modifying it so that we'll never
1062 * overwrite the unmount record after a clean unmount.
1064 * Do this only if we are going to recover the filesystem
1066 * NOTE: This used to say "if (!readonly)"
1067 * However on Linux, we can & do recover a read-only filesystem.
1068 * We only skip recovery if NORECOVERY is specified on mount,
1069 * in which case we would not be here.
1071 * But... if the -device- itself is readonly, just skip this.
1072 * We can't recover this device anyway, so it won't matter.
1074 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1075 error = xlog_clear_stale_blocks(log, tail_lsn);
1081 xfs_warn(log->l_mp, "failed to locate log tail");
1086 * Is the log zeroed at all?
1088 * The last binary search should be changed to perform an X block read
1089 * once X becomes small enough. You can then search linearly through
1090 * the X blocks. This will cut down on the number of reads we need to do.
1092 * If the log is partially zeroed, this routine will pass back the blkno
1093 * of the first block with cycle number 0. It won't have a complete LR
1097 * 0 => the log is completely written to
1098 * -1 => use *blk_no as the first block of the log
1099 * >0 => error has occurred
1104 xfs_daddr_t *blk_no)
1108 uint first_cycle, last_cycle;
1109 xfs_daddr_t new_blk, last_blk, start_blk;
1110 xfs_daddr_t num_scan_bblks;
1111 int error, log_bbnum = log->l_logBBsize;
1115 /* check totally zeroed log */
1116 bp = xlog_get_bp(log, 1);
1119 error = xlog_bread(log, 0, 1, bp, &offset);
1123 first_cycle = xlog_get_cycle(offset);
1124 if (first_cycle == 0) { /* completely zeroed log */
1130 /* check partially zeroed log */
1131 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1135 last_cycle = xlog_get_cycle(offset);
1136 if (last_cycle != 0) { /* log completely written to */
1139 } else if (first_cycle != 1) {
1141 * If the cycle of the last block is zero, the cycle of
1142 * the first block must be 1. If it's not, maybe we're
1143 * not looking at a log... Bail out.
1146 "Log inconsistent or not a log (last==0, first!=1)");
1147 return XFS_ERROR(EINVAL);
1150 /* we have a partially zeroed log */
1151 last_blk = log_bbnum-1;
1152 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1156 * Validate the answer. Because there is no way to guarantee that
1157 * the entire log is made up of log records which are the same size,
1158 * we scan over the defined maximum blocks. At this point, the maximum
1159 * is not chosen to mean anything special. XXXmiken
1161 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1162 ASSERT(num_scan_bblks <= INT_MAX);
1164 if (last_blk < num_scan_bblks)
1165 num_scan_bblks = last_blk;
1166 start_blk = last_blk - num_scan_bblks;
1169 * We search for any instances of cycle number 0 that occur before
1170 * our current estimate of the head. What we're trying to detect is
1171 * 1 ... | 0 | 1 | 0...
1172 * ^ binary search ends here
1174 if ((error = xlog_find_verify_cycle(log, start_blk,
1175 (int)num_scan_bblks, 0, &new_blk)))
1181 * Potentially backup over partial log record write. We don't need
1182 * to search the end of the log because we know it is zero.
1184 if ((error = xlog_find_verify_log_record(log, start_blk,
1185 &last_blk, 0)) == -1) {
1186 error = XFS_ERROR(EIO);
1200 * These are simple subroutines used by xlog_clear_stale_blocks() below
1201 * to initialize a buffer full of empty log record headers and write
1202 * them into the log.
1213 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1215 memset(buf, 0, BBSIZE);
1216 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1217 recp->h_cycle = cpu_to_be32(cycle);
1218 recp->h_version = cpu_to_be32(
1219 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1220 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1221 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1222 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1223 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1227 xlog_write_log_records(
1238 int sectbb = log->l_sectBBsize;
1239 int end_block = start_block + blocks;
1245 * Greedily allocate a buffer big enough to handle the full
1246 * range of basic blocks to be written. If that fails, try
1247 * a smaller size. We need to be able to write at least a
1248 * log sector, or we're out of luck.
1250 bufblks = 1 << ffs(blocks);
1251 while (bufblks > log->l_logBBsize)
1253 while (!(bp = xlog_get_bp(log, bufblks))) {
1255 if (bufblks < sectbb)
1259 /* We may need to do a read at the start to fill in part of
1260 * the buffer in the starting sector not covered by the first
1263 balign = round_down(start_block, sectbb);
1264 if (balign != start_block) {
1265 error = xlog_bread_noalign(log, start_block, 1, bp);
1269 j = start_block - balign;
1272 for (i = start_block; i < end_block; i += bufblks) {
1273 int bcount, endcount;
1275 bcount = min(bufblks, end_block - start_block);
1276 endcount = bcount - j;
1278 /* We may need to do a read at the end to fill in part of
1279 * the buffer in the final sector not covered by the write.
1280 * If this is the same sector as the above read, skip it.
1282 ealign = round_down(end_block, sectbb);
1283 if (j == 0 && (start_block + endcount > ealign)) {
1284 offset = bp->b_addr + BBTOB(ealign - start_block);
1285 error = xlog_bread_offset(log, ealign, sectbb,
1292 offset = xlog_align(log, start_block, endcount, bp);
1293 for (; j < endcount; j++) {
1294 xlog_add_record(log, offset, cycle, i+j,
1295 tail_cycle, tail_block);
1298 error = xlog_bwrite(log, start_block, endcount, bp);
1301 start_block += endcount;
1311 * This routine is called to blow away any incomplete log writes out
1312 * in front of the log head. We do this so that we won't become confused
1313 * if we come up, write only a little bit more, and then crash again.
1314 * If we leave the partial log records out there, this situation could
1315 * cause us to think those partial writes are valid blocks since they
1316 * have the current cycle number. We get rid of them by overwriting them
1317 * with empty log records with the old cycle number rather than the
1320 * The tail lsn is passed in rather than taken from
1321 * the log so that we will not write over the unmount record after a
1322 * clean unmount in a 512 block log. Doing so would leave the log without
1323 * any valid log records in it until a new one was written. If we crashed
1324 * during that time we would not be able to recover.
1327 xlog_clear_stale_blocks(
1331 int tail_cycle, head_cycle;
1332 int tail_block, head_block;
1333 int tail_distance, max_distance;
1337 tail_cycle = CYCLE_LSN(tail_lsn);
1338 tail_block = BLOCK_LSN(tail_lsn);
1339 head_cycle = log->l_curr_cycle;
1340 head_block = log->l_curr_block;
1343 * Figure out the distance between the new head of the log
1344 * and the tail. We want to write over any blocks beyond the
1345 * head that we may have written just before the crash, but
1346 * we don't want to overwrite the tail of the log.
1348 if (head_cycle == tail_cycle) {
1350 * The tail is behind the head in the physical log,
1351 * so the distance from the head to the tail is the
1352 * distance from the head to the end of the log plus
1353 * the distance from the beginning of the log to the
1356 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1357 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1358 XFS_ERRLEVEL_LOW, log->l_mp);
1359 return XFS_ERROR(EFSCORRUPTED);
1361 tail_distance = tail_block + (log->l_logBBsize - head_block);
1364 * The head is behind the tail in the physical log,
1365 * so the distance from the head to the tail is just
1366 * the tail block minus the head block.
1368 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1369 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1370 XFS_ERRLEVEL_LOW, log->l_mp);
1371 return XFS_ERROR(EFSCORRUPTED);
1373 tail_distance = tail_block - head_block;
1377 * If the head is right up against the tail, we can't clear
1380 if (tail_distance <= 0) {
1381 ASSERT(tail_distance == 0);
1385 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1387 * Take the smaller of the maximum amount of outstanding I/O
1388 * we could have and the distance to the tail to clear out.
1389 * We take the smaller so that we don't overwrite the tail and
1390 * we don't waste all day writing from the head to the tail
1393 max_distance = MIN(max_distance, tail_distance);
1395 if ((head_block + max_distance) <= log->l_logBBsize) {
1397 * We can stomp all the blocks we need to without
1398 * wrapping around the end of the log. Just do it
1399 * in a single write. Use the cycle number of the
1400 * current cycle minus one so that the log will look like:
1403 error = xlog_write_log_records(log, (head_cycle - 1),
1404 head_block, max_distance, tail_cycle,
1410 * We need to wrap around the end of the physical log in
1411 * order to clear all the blocks. Do it in two separate
1412 * I/Os. The first write should be from the head to the
1413 * end of the physical log, and it should use the current
1414 * cycle number minus one just like above.
1416 distance = log->l_logBBsize - head_block;
1417 error = xlog_write_log_records(log, (head_cycle - 1),
1418 head_block, distance, tail_cycle,
1425 * Now write the blocks at the start of the physical log.
1426 * This writes the remainder of the blocks we want to clear.
1427 * It uses the current cycle number since we're now on the
1428 * same cycle as the head so that we get:
1429 * n ... n ... | n - 1 ...
1430 * ^^^^^ blocks we're writing
1432 distance = max_distance - (log->l_logBBsize - head_block);
1433 error = xlog_write_log_records(log, head_cycle, 0, distance,
1434 tail_cycle, tail_block);
1442 /******************************************************************************
1444 * Log recover routines
1446 ******************************************************************************
1449 STATIC xlog_recover_t *
1450 xlog_recover_find_tid(
1451 struct hlist_head *head,
1454 xlog_recover_t *trans;
1456 hlist_for_each_entry(trans, head, r_list) {
1457 if (trans->r_log_tid == tid)
1464 xlog_recover_new_tid(
1465 struct hlist_head *head,
1469 xlog_recover_t *trans;
1471 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1472 trans->r_log_tid = tid;
1474 INIT_LIST_HEAD(&trans->r_itemq);
1476 INIT_HLIST_NODE(&trans->r_list);
1477 hlist_add_head(&trans->r_list, head);
1481 xlog_recover_add_item(
1482 struct list_head *head)
1484 xlog_recover_item_t *item;
1486 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1487 INIT_LIST_HEAD(&item->ri_list);
1488 list_add_tail(&item->ri_list, head);
1492 xlog_recover_add_to_cont_trans(
1494 struct xlog_recover *trans,
1498 xlog_recover_item_t *item;
1499 xfs_caddr_t ptr, old_ptr;
1502 if (list_empty(&trans->r_itemq)) {
1503 /* finish copying rest of trans header */
1504 xlog_recover_add_item(&trans->r_itemq);
1505 ptr = (xfs_caddr_t) &trans->r_theader +
1506 sizeof(xfs_trans_header_t) - len;
1507 memcpy(ptr, dp, len); /* d, s, l */
1510 /* take the tail entry */
1511 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1513 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1514 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1516 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1517 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1518 item->ri_buf[item->ri_cnt-1].i_len += len;
1519 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1520 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1525 * The next region to add is the start of a new region. It could be
1526 * a whole region or it could be the first part of a new region. Because
1527 * of this, the assumption here is that the type and size fields of all
1528 * format structures fit into the first 32 bits of the structure.
1530 * This works because all regions must be 32 bit aligned. Therefore, we
1531 * either have both fields or we have neither field. In the case we have
1532 * neither field, the data part of the region is zero length. We only have
1533 * a log_op_header and can throw away the header since a new one will appear
1534 * later. If we have at least 4 bytes, then we can determine how many regions
1535 * will appear in the current log item.
1538 xlog_recover_add_to_trans(
1540 struct xlog_recover *trans,
1544 xfs_inode_log_format_t *in_f; /* any will do */
1545 xlog_recover_item_t *item;
1550 if (list_empty(&trans->r_itemq)) {
1551 /* we need to catch log corruptions here */
1552 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1553 xfs_warn(log->l_mp, "%s: bad header magic number",
1556 return XFS_ERROR(EIO);
1558 if (len == sizeof(xfs_trans_header_t))
1559 xlog_recover_add_item(&trans->r_itemq);
1560 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1564 ptr = kmem_alloc(len, KM_SLEEP);
1565 memcpy(ptr, dp, len);
1566 in_f = (xfs_inode_log_format_t *)ptr;
1568 /* take the tail entry */
1569 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1570 if (item->ri_total != 0 &&
1571 item->ri_total == item->ri_cnt) {
1572 /* tail item is in use, get a new one */
1573 xlog_recover_add_item(&trans->r_itemq);
1574 item = list_entry(trans->r_itemq.prev,
1575 xlog_recover_item_t, ri_list);
1578 if (item->ri_total == 0) { /* first region to be added */
1579 if (in_f->ilf_size == 0 ||
1580 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1582 "bad number of regions (%d) in inode log format",
1585 return XFS_ERROR(EIO);
1588 item->ri_total = in_f->ilf_size;
1590 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1593 ASSERT(item->ri_total > item->ri_cnt);
1594 /* Description region is ri_buf[0] */
1595 item->ri_buf[item->ri_cnt].i_addr = ptr;
1596 item->ri_buf[item->ri_cnt].i_len = len;
1598 trace_xfs_log_recover_item_add(log, trans, item, 0);
1603 * Sort the log items in the transaction.
1605 * The ordering constraints are defined by the inode allocation and unlink
1606 * behaviour. The rules are:
1608 * 1. Every item is only logged once in a given transaction. Hence it
1609 * represents the last logged state of the item. Hence ordering is
1610 * dependent on the order in which operations need to be performed so
1611 * required initial conditions are always met.
1613 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1614 * there's nothing to replay from them so we can simply cull them
1615 * from the transaction. However, we can't do that until after we've
1616 * replayed all the other items because they may be dependent on the
1617 * cancelled buffer and replaying the cancelled buffer can remove it
1618 * form the cancelled buffer table. Hence they have tobe done last.
1620 * 3. Inode allocation buffers must be replayed before inode items that
1621 * read the buffer and replay changes into it. For filesystems using the
1622 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1623 * treated the same as inode allocation buffers as they create and
1624 * initialise the buffers directly.
1626 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1627 * This ensures that inodes are completely flushed to the inode buffer
1628 * in a "free" state before we remove the unlinked inode list pointer.
1630 * Hence the ordering needs to be inode allocation buffers first, inode items
1631 * second, inode unlink buffers third and cancelled buffers last.
1633 * But there's a problem with that - we can't tell an inode allocation buffer
1634 * apart from a regular buffer, so we can't separate them. We can, however,
1635 * tell an inode unlink buffer from the others, and so we can separate them out
1636 * from all the other buffers and move them to last.
1638 * Hence, 4 lists, in order from head to tail:
1639 * - buffer_list for all buffers except cancelled/inode unlink buffers
1640 * - item_list for all non-buffer items
1641 * - inode_buffer_list for inode unlink buffers
1642 * - cancel_list for the cancelled buffers
1644 * Note that we add objects to the tail of the lists so that first-to-last
1645 * ordering is preserved within the lists. Adding objects to the head of the
1646 * list means when we traverse from the head we walk them in last-to-first
1647 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1648 * but for all other items there may be specific ordering that we need to
1652 xlog_recover_reorder_trans(
1654 struct xlog_recover *trans,
1657 xlog_recover_item_t *item, *n;
1658 LIST_HEAD(sort_list);
1659 LIST_HEAD(cancel_list);
1660 LIST_HEAD(buffer_list);
1661 LIST_HEAD(inode_buffer_list);
1662 LIST_HEAD(inode_list);
1664 list_splice_init(&trans->r_itemq, &sort_list);
1665 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1666 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1668 switch (ITEM_TYPE(item)) {
1669 case XFS_LI_ICREATE:
1670 list_move_tail(&item->ri_list, &buffer_list);
1673 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1674 trace_xfs_log_recover_item_reorder_head(log,
1676 list_move(&item->ri_list, &cancel_list);
1679 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1680 list_move(&item->ri_list, &inode_buffer_list);
1683 list_move_tail(&item->ri_list, &buffer_list);
1687 case XFS_LI_QUOTAOFF:
1690 trace_xfs_log_recover_item_reorder_tail(log,
1692 list_move_tail(&item->ri_list, &inode_list);
1696 "%s: unrecognized type of log operation",
1699 return XFS_ERROR(EIO);
1702 ASSERT(list_empty(&sort_list));
1703 if (!list_empty(&buffer_list))
1704 list_splice(&buffer_list, &trans->r_itemq);
1705 if (!list_empty(&inode_list))
1706 list_splice_tail(&inode_list, &trans->r_itemq);
1707 if (!list_empty(&inode_buffer_list))
1708 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1709 if (!list_empty(&cancel_list))
1710 list_splice_tail(&cancel_list, &trans->r_itemq);
1715 * Build up the table of buf cancel records so that we don't replay
1716 * cancelled data in the second pass. For buffer records that are
1717 * not cancel records, there is nothing to do here so we just return.
1719 * If we get a cancel record which is already in the table, this indicates
1720 * that the buffer was cancelled multiple times. In order to ensure
1721 * that during pass 2 we keep the record in the table until we reach its
1722 * last occurrence in the log, we keep a reference count in the cancel
1723 * record in the table to tell us how many times we expect to see this
1724 * record during the second pass.
1727 xlog_recover_buffer_pass1(
1729 struct xlog_recover_item *item)
1731 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1732 struct list_head *bucket;
1733 struct xfs_buf_cancel *bcp;
1736 * If this isn't a cancel buffer item, then just return.
1738 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1739 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1744 * Insert an xfs_buf_cancel record into the hash table of them.
1745 * If there is already an identical record, bump its reference count.
1747 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1748 list_for_each_entry(bcp, bucket, bc_list) {
1749 if (bcp->bc_blkno == buf_f->blf_blkno &&
1750 bcp->bc_len == buf_f->blf_len) {
1752 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1757 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1758 bcp->bc_blkno = buf_f->blf_blkno;
1759 bcp->bc_len = buf_f->blf_len;
1760 bcp->bc_refcount = 1;
1761 list_add_tail(&bcp->bc_list, bucket);
1763 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1768 * Check to see whether the buffer being recovered has a corresponding
1769 * entry in the buffer cancel record table. If it does then return 1
1770 * so that it will be cancelled, otherwise return 0. If the buffer is
1771 * actually a buffer cancel item (XFS_BLF_CANCEL is set), then decrement
1772 * the refcount on the entry in the table and remove it from the table
1773 * if this is the last reference.
1775 * We remove the cancel record from the table when we encounter its
1776 * last occurrence in the log so that if the same buffer is re-used
1777 * again after its last cancellation we actually replay the changes
1778 * made at that point.
1781 xlog_check_buffer_cancelled(
1787 struct list_head *bucket;
1788 struct xfs_buf_cancel *bcp;
1790 if (log->l_buf_cancel_table == NULL) {
1792 * There is nothing in the table built in pass one,
1793 * so this buffer must not be cancelled.
1795 ASSERT(!(flags & XFS_BLF_CANCEL));
1800 * Search for an entry in the cancel table that matches our buffer.
1802 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1803 list_for_each_entry(bcp, bucket, bc_list) {
1804 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1809 * We didn't find a corresponding entry in the table, so return 0 so
1810 * that the buffer is NOT cancelled.
1812 ASSERT(!(flags & XFS_BLF_CANCEL));
1817 * We've go a match, so return 1 so that the recovery of this buffer
1818 * is cancelled. If this buffer is actually a buffer cancel log
1819 * item, then decrement the refcount on the one in the table and
1820 * remove it if this is the last reference.
1822 if (flags & XFS_BLF_CANCEL) {
1823 if (--bcp->bc_refcount == 0) {
1824 list_del(&bcp->bc_list);
1832 * Perform recovery for a buffer full of inodes. In these buffers, the only
1833 * data which should be recovered is that which corresponds to the
1834 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1835 * data for the inodes is always logged through the inodes themselves rather
1836 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1838 * The only time when buffers full of inodes are fully recovered is when the
1839 * buffer is full of newly allocated inodes. In this case the buffer will
1840 * not be marked as an inode buffer and so will be sent to
1841 * xlog_recover_do_reg_buffer() below during recovery.
1844 xlog_recover_do_inode_buffer(
1845 struct xfs_mount *mp,
1846 xlog_recover_item_t *item,
1848 xfs_buf_log_format_t *buf_f)
1854 int reg_buf_offset = 0;
1855 int reg_buf_bytes = 0;
1856 int next_unlinked_offset;
1858 xfs_agino_t *logged_nextp;
1859 xfs_agino_t *buffer_nextp;
1861 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1864 * Post recovery validation only works properly on CRC enabled
1867 if (xfs_sb_version_hascrc(&mp->m_sb))
1868 bp->b_ops = &xfs_inode_buf_ops;
1870 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1871 for (i = 0; i < inodes_per_buf; i++) {
1872 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1873 offsetof(xfs_dinode_t, di_next_unlinked);
1875 while (next_unlinked_offset >=
1876 (reg_buf_offset + reg_buf_bytes)) {
1878 * The next di_next_unlinked field is beyond
1879 * the current logged region. Find the next
1880 * logged region that contains or is beyond
1881 * the current di_next_unlinked field.
1884 bit = xfs_next_bit(buf_f->blf_data_map,
1885 buf_f->blf_map_size, bit);
1888 * If there are no more logged regions in the
1889 * buffer, then we're done.
1894 nbits = xfs_contig_bits(buf_f->blf_data_map,
1895 buf_f->blf_map_size, bit);
1897 reg_buf_offset = bit << XFS_BLF_SHIFT;
1898 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1903 * If the current logged region starts after the current
1904 * di_next_unlinked field, then move on to the next
1905 * di_next_unlinked field.
1907 if (next_unlinked_offset < reg_buf_offset)
1910 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1911 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1912 ASSERT((reg_buf_offset + reg_buf_bytes) <=
1913 BBTOB(bp->b_io_length));
1916 * The current logged region contains a copy of the
1917 * current di_next_unlinked field. Extract its value
1918 * and copy it to the buffer copy.
1920 logged_nextp = item->ri_buf[item_index].i_addr +
1921 next_unlinked_offset - reg_buf_offset;
1922 if (unlikely(*logged_nextp == 0)) {
1924 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1925 "Trying to replay bad (0) inode di_next_unlinked field.",
1927 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1928 XFS_ERRLEVEL_LOW, mp);
1929 return XFS_ERROR(EFSCORRUPTED);
1932 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1933 next_unlinked_offset);
1934 *buffer_nextp = *logged_nextp;
1937 * If necessary, recalculate the CRC in the on-disk inode. We
1938 * have to leave the inode in a consistent state for whoever
1941 xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1942 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1950 * Validate the recovered buffer is of the correct type and attach the
1951 * appropriate buffer operations to them for writeback. Magic numbers are in a
1953 * the first 16 bits of the buffer (inode buffer, dquot buffer),
1954 * the first 32 bits of the buffer (most blocks),
1955 * inside a struct xfs_da_blkinfo at the start of the buffer.
1958 xlog_recovery_validate_buf_type(
1959 struct xfs_mount *mp,
1961 xfs_buf_log_format_t *buf_f)
1963 struct xfs_da_blkinfo *info = bp->b_addr;
1968 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
1969 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
1970 magicda = be16_to_cpu(info->magic);
1971 switch (xfs_blft_from_flags(buf_f)) {
1972 case XFS_BLFT_BTREE_BUF:
1974 case XFS_ABTB_CRC_MAGIC:
1975 case XFS_ABTC_CRC_MAGIC:
1976 case XFS_ABTB_MAGIC:
1977 case XFS_ABTC_MAGIC:
1978 bp->b_ops = &xfs_allocbt_buf_ops;
1980 case XFS_IBT_CRC_MAGIC:
1982 bp->b_ops = &xfs_inobt_buf_ops;
1984 case XFS_BMAP_CRC_MAGIC:
1985 case XFS_BMAP_MAGIC:
1986 bp->b_ops = &xfs_bmbt_buf_ops;
1989 xfs_warn(mp, "Bad btree block magic!");
1994 case XFS_BLFT_AGF_BUF:
1995 if (magic32 != XFS_AGF_MAGIC) {
1996 xfs_warn(mp, "Bad AGF block magic!");
2000 bp->b_ops = &xfs_agf_buf_ops;
2002 case XFS_BLFT_AGFL_BUF:
2003 if (!xfs_sb_version_hascrc(&mp->m_sb))
2005 if (magic32 != XFS_AGFL_MAGIC) {
2006 xfs_warn(mp, "Bad AGFL block magic!");
2010 bp->b_ops = &xfs_agfl_buf_ops;
2012 case XFS_BLFT_AGI_BUF:
2013 if (magic32 != XFS_AGI_MAGIC) {
2014 xfs_warn(mp, "Bad AGI block magic!");
2018 bp->b_ops = &xfs_agi_buf_ops;
2020 case XFS_BLFT_UDQUOT_BUF:
2021 case XFS_BLFT_PDQUOT_BUF:
2022 case XFS_BLFT_GDQUOT_BUF:
2023 #ifdef CONFIG_XFS_QUOTA
2024 if (magic16 != XFS_DQUOT_MAGIC) {
2025 xfs_warn(mp, "Bad DQUOT block magic!");
2029 bp->b_ops = &xfs_dquot_buf_ops;
2032 "Trying to recover dquots without QUOTA support built in!");
2036 case XFS_BLFT_DINO_BUF:
2038 * we get here with inode allocation buffers, not buffers that
2039 * track unlinked list changes.
2041 if (magic16 != XFS_DINODE_MAGIC) {
2042 xfs_warn(mp, "Bad INODE block magic!");
2046 bp->b_ops = &xfs_inode_buf_ops;
2048 case XFS_BLFT_SYMLINK_BUF:
2049 if (magic32 != XFS_SYMLINK_MAGIC) {
2050 xfs_warn(mp, "Bad symlink block magic!");
2054 bp->b_ops = &xfs_symlink_buf_ops;
2056 case XFS_BLFT_DIR_BLOCK_BUF:
2057 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2058 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2059 xfs_warn(mp, "Bad dir block magic!");
2063 bp->b_ops = &xfs_dir3_block_buf_ops;
2065 case XFS_BLFT_DIR_DATA_BUF:
2066 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2067 magic32 != XFS_DIR3_DATA_MAGIC) {
2068 xfs_warn(mp, "Bad dir data magic!");
2072 bp->b_ops = &xfs_dir3_data_buf_ops;
2074 case XFS_BLFT_DIR_FREE_BUF:
2075 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2076 magic32 != XFS_DIR3_FREE_MAGIC) {
2077 xfs_warn(mp, "Bad dir3 free magic!");
2081 bp->b_ops = &xfs_dir3_free_buf_ops;
2083 case XFS_BLFT_DIR_LEAF1_BUF:
2084 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2085 magicda != XFS_DIR3_LEAF1_MAGIC) {
2086 xfs_warn(mp, "Bad dir leaf1 magic!");
2090 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2092 case XFS_BLFT_DIR_LEAFN_BUF:
2093 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2094 magicda != XFS_DIR3_LEAFN_MAGIC) {
2095 xfs_warn(mp, "Bad dir leafn magic!");
2099 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2101 case XFS_BLFT_DA_NODE_BUF:
2102 if (magicda != XFS_DA_NODE_MAGIC &&
2103 magicda != XFS_DA3_NODE_MAGIC) {
2104 xfs_warn(mp, "Bad da node magic!");
2108 bp->b_ops = &xfs_da3_node_buf_ops;
2110 case XFS_BLFT_ATTR_LEAF_BUF:
2111 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2112 magicda != XFS_ATTR3_LEAF_MAGIC) {
2113 xfs_warn(mp, "Bad attr leaf magic!");
2117 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2119 case XFS_BLFT_ATTR_RMT_BUF:
2120 if (!xfs_sb_version_hascrc(&mp->m_sb))
2122 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2123 xfs_warn(mp, "Bad attr remote magic!");
2127 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2129 case XFS_BLFT_SB_BUF:
2130 if (magic32 != XFS_SB_MAGIC) {
2131 xfs_warn(mp, "Bad SB block magic!");
2135 bp->b_ops = &xfs_sb_buf_ops;
2138 xfs_warn(mp, "Unknown buffer type %d!",
2139 xfs_blft_from_flags(buf_f));
2145 * Perform a 'normal' buffer recovery. Each logged region of the
2146 * buffer should be copied over the corresponding region in the
2147 * given buffer. The bitmap in the buf log format structure indicates
2148 * where to place the logged data.
2151 xlog_recover_do_reg_buffer(
2152 struct xfs_mount *mp,
2153 xlog_recover_item_t *item,
2155 xfs_buf_log_format_t *buf_f)
2162 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2165 i = 1; /* 0 is the buf format structure */
2167 bit = xfs_next_bit(buf_f->blf_data_map,
2168 buf_f->blf_map_size, bit);
2171 nbits = xfs_contig_bits(buf_f->blf_data_map,
2172 buf_f->blf_map_size, bit);
2174 ASSERT(item->ri_buf[i].i_addr != NULL);
2175 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2176 ASSERT(BBTOB(bp->b_io_length) >=
2177 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2180 * The dirty regions logged in the buffer, even though
2181 * contiguous, may span multiple chunks. This is because the
2182 * dirty region may span a physical page boundary in a buffer
2183 * and hence be split into two separate vectors for writing into
2184 * the log. Hence we need to trim nbits back to the length of
2185 * the current region being copied out of the log.
2187 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2188 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2191 * Do a sanity check if this is a dquot buffer. Just checking
2192 * the first dquot in the buffer should do. XXXThis is
2193 * probably a good thing to do for other buf types also.
2196 if (buf_f->blf_flags &
2197 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2198 if (item->ri_buf[i].i_addr == NULL) {
2200 "XFS: NULL dquot in %s.", __func__);
2203 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2205 "XFS: dquot too small (%d) in %s.",
2206 item->ri_buf[i].i_len, __func__);
2209 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
2210 -1, 0, XFS_QMOPT_DOWARN,
2211 "dquot_buf_recover");
2216 memcpy(xfs_buf_offset(bp,
2217 (uint)bit << XFS_BLF_SHIFT), /* dest */
2218 item->ri_buf[i].i_addr, /* source */
2219 nbits<<XFS_BLF_SHIFT); /* length */
2225 /* Shouldn't be any more regions */
2226 ASSERT(i == item->ri_total);
2229 * We can only do post recovery validation on items on CRC enabled
2230 * fielsystems as we need to know when the buffer was written to be able
2231 * to determine if we should have replayed the item. If we replay old
2232 * metadata over a newer buffer, then it will enter a temporarily
2233 * inconsistent state resulting in verification failures. Hence for now
2234 * just avoid the verification stage for non-crc filesystems
2236 if (xfs_sb_version_hascrc(&mp->m_sb))
2237 xlog_recovery_validate_buf_type(mp, bp, buf_f);
2241 * Do some primitive error checking on ondisk dquot data structures.
2245 struct xfs_mount *mp,
2246 xfs_disk_dquot_t *ddq,
2248 uint type, /* used only when IO_dorepair is true */
2252 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
2256 * We can encounter an uninitialized dquot buffer for 2 reasons:
2257 * 1. If we crash while deleting the quotainode(s), and those blks got
2258 * used for user data. This is because we take the path of regular
2259 * file deletion; however, the size field of quotainodes is never
2260 * updated, so all the tricks that we play in itruncate_finish
2261 * don't quite matter.
2263 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2264 * But the allocation will be replayed so we'll end up with an
2265 * uninitialized quota block.
2267 * This is all fine; things are still consistent, and we haven't lost
2268 * any quota information. Just don't complain about bad dquot blks.
2270 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2271 if (flags & XFS_QMOPT_DOWARN)
2273 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2274 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2277 if (ddq->d_version != XFS_DQUOT_VERSION) {
2278 if (flags & XFS_QMOPT_DOWARN)
2280 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2281 str, id, ddq->d_version, XFS_DQUOT_VERSION);
2285 if (ddq->d_flags != XFS_DQ_USER &&
2286 ddq->d_flags != XFS_DQ_PROJ &&
2287 ddq->d_flags != XFS_DQ_GROUP) {
2288 if (flags & XFS_QMOPT_DOWARN)
2290 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2291 str, id, ddq->d_flags);
2295 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2296 if (flags & XFS_QMOPT_DOWARN)
2298 "%s : ondisk-dquot 0x%p, ID mismatch: "
2299 "0x%x expected, found id 0x%x",
2300 str, ddq, id, be32_to_cpu(ddq->d_id));
2304 if (!errs && ddq->d_id) {
2305 if (ddq->d_blk_softlimit &&
2306 be64_to_cpu(ddq->d_bcount) >
2307 be64_to_cpu(ddq->d_blk_softlimit)) {
2308 if (!ddq->d_btimer) {
2309 if (flags & XFS_QMOPT_DOWARN)
2311 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2312 str, (int)be32_to_cpu(ddq->d_id), ddq);
2316 if (ddq->d_ino_softlimit &&
2317 be64_to_cpu(ddq->d_icount) >
2318 be64_to_cpu(ddq->d_ino_softlimit)) {
2319 if (!ddq->d_itimer) {
2320 if (flags & XFS_QMOPT_DOWARN)
2322 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2323 str, (int)be32_to_cpu(ddq->d_id), ddq);
2327 if (ddq->d_rtb_softlimit &&
2328 be64_to_cpu(ddq->d_rtbcount) >
2329 be64_to_cpu(ddq->d_rtb_softlimit)) {
2330 if (!ddq->d_rtbtimer) {
2331 if (flags & XFS_QMOPT_DOWARN)
2333 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2334 str, (int)be32_to_cpu(ddq->d_id), ddq);
2340 if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2343 if (flags & XFS_QMOPT_DOWARN)
2344 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2347 * Typically, a repair is only requested by quotacheck.
2350 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2351 memset(d, 0, sizeof(xfs_dqblk_t));
2353 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2354 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2355 d->dd_diskdq.d_flags = type;
2356 d->dd_diskdq.d_id = cpu_to_be32(id);
2358 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2359 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2360 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2368 * Perform a dquot buffer recovery.
2369 * Simple algorithm: if we have found a QUOTAOFF logitem of the same type
2370 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2371 * Else, treat it as a regular buffer and do recovery.
2374 xlog_recover_do_dquot_buffer(
2375 struct xfs_mount *mp,
2377 struct xlog_recover_item *item,
2379 struct xfs_buf_log_format *buf_f)
2383 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2386 * Filesystems are required to send in quota flags at mount time.
2388 if (mp->m_qflags == 0) {
2393 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2394 type |= XFS_DQ_USER;
2395 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2396 type |= XFS_DQ_PROJ;
2397 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2398 type |= XFS_DQ_GROUP;
2400 * This type of quotas was turned off, so ignore this buffer
2402 if (log->l_quotaoffs_flag & type)
2405 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2409 * This routine replays a modification made to a buffer at runtime.
2410 * There are actually two types of buffer, regular and inode, which
2411 * are handled differently. Inode buffers are handled differently
2412 * in that we only recover a specific set of data from them, namely
2413 * the inode di_next_unlinked fields. This is because all other inode
2414 * data is actually logged via inode records and any data we replay
2415 * here which overlaps that may be stale.
2417 * When meta-data buffers are freed at run time we log a buffer item
2418 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2419 * of the buffer in the log should not be replayed at recovery time.
2420 * This is so that if the blocks covered by the buffer are reused for
2421 * file data before we crash we don't end up replaying old, freed
2422 * meta-data into a user's file.
2424 * To handle the cancellation of buffer log items, we make two passes
2425 * over the log during recovery. During the first we build a table of
2426 * those buffers which have been cancelled, and during the second we
2427 * only replay those buffers which do not have corresponding cancel
2428 * records in the table. See xlog_recover_do_buffer_pass[1,2] above
2429 * for more details on the implementation of the table of cancel records.
2432 xlog_recover_buffer_pass2(
2434 struct list_head *buffer_list,
2435 struct xlog_recover_item *item)
2437 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2438 xfs_mount_t *mp = log->l_mp;
2444 * In this pass we only want to recover all the buffers which have
2445 * not been cancelled and are not cancellation buffers themselves.
2447 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2448 buf_f->blf_len, buf_f->blf_flags)) {
2449 trace_xfs_log_recover_buf_cancel(log, buf_f);
2453 trace_xfs_log_recover_buf_recover(log, buf_f);
2456 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2457 buf_flags |= XBF_UNMAPPED;
2459 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2462 return XFS_ERROR(ENOMEM);
2463 error = bp->b_error;
2465 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2470 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2471 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2472 } else if (buf_f->blf_flags &
2473 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2474 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2476 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2479 return XFS_ERROR(error);
2482 * Perform delayed write on the buffer. Asynchronous writes will be
2483 * slower when taking into account all the buffers to be flushed.
2485 * Also make sure that only inode buffers with good sizes stay in
2486 * the buffer cache. The kernel moves inodes in buffers of 1 block
2487 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
2488 * buffers in the log can be a different size if the log was generated
2489 * by an older kernel using unclustered inode buffers or a newer kernel
2490 * running with a different inode cluster size. Regardless, if the
2491 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2492 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2493 * the buffer out of the buffer cache so that the buffer won't
2494 * overlap with future reads of those inodes.
2496 if (XFS_DINODE_MAGIC ==
2497 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2498 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2499 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2501 error = xfs_bwrite(bp);
2503 ASSERT(bp->b_target->bt_mount == mp);
2504 bp->b_iodone = xlog_recover_iodone;
2505 xfs_buf_delwri_queue(bp, buffer_list);
2513 xlog_recover_inode_pass2(
2515 struct list_head *buffer_list,
2516 struct xlog_recover_item *item)
2518 xfs_inode_log_format_t *in_f;
2519 xfs_mount_t *mp = log->l_mp;
2528 xfs_icdinode_t *dicp;
2532 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2533 in_f = item->ri_buf[0].i_addr;
2535 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2537 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2543 * Inode buffers can be freed, look out for it,
2544 * and do not replay the inode.
2546 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2547 in_f->ilf_len, 0)) {
2549 trace_xfs_log_recover_inode_cancel(log, in_f);
2552 trace_xfs_log_recover_inode_recover(log, in_f);
2554 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2555 &xfs_inode_buf_ops);
2560 error = bp->b_error;
2562 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2566 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2567 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2570 * Make sure the place we're flushing out to really looks
2573 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2576 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2577 __func__, dip, bp, in_f->ilf_ino);
2578 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2579 XFS_ERRLEVEL_LOW, mp);
2580 error = EFSCORRUPTED;
2583 dicp = item->ri_buf[1].i_addr;
2584 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2587 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2588 __func__, item, in_f->ilf_ino);
2589 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2590 XFS_ERRLEVEL_LOW, mp);
2591 error = EFSCORRUPTED;
2596 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2597 * are transactional and if ordering is necessary we can determine that
2598 * more accurately by the LSN field in the V3 inode core. Don't trust
2599 * the inode versions we might be changing them here - use the
2600 * superblock flag to determine whether we need to look at di_flushiter
2601 * to skip replay when the on disk inode is newer than the log one
2603 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2604 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2606 * Deal with the wrap case, DI_MAX_FLUSH is less
2607 * than smaller numbers
2609 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2610 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2614 trace_xfs_log_recover_inode_skip(log, in_f);
2620 /* Take the opportunity to reset the flush iteration count */
2621 dicp->di_flushiter = 0;
2623 if (unlikely(S_ISREG(dicp->di_mode))) {
2624 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2625 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2626 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2627 XFS_ERRLEVEL_LOW, mp, dicp);
2630 "%s: Bad regular inode log record, rec ptr 0x%p, "
2631 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2632 __func__, item, dip, bp, in_f->ilf_ino);
2633 error = EFSCORRUPTED;
2636 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2637 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2638 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2639 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2640 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2641 XFS_ERRLEVEL_LOW, mp, dicp);
2644 "%s: Bad dir inode log record, rec ptr 0x%p, "
2645 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2646 __func__, item, dip, bp, in_f->ilf_ino);
2647 error = EFSCORRUPTED;
2651 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2652 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2653 XFS_ERRLEVEL_LOW, mp, dicp);
2656 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2657 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2658 __func__, item, dip, bp, in_f->ilf_ino,
2659 dicp->di_nextents + dicp->di_anextents,
2661 error = EFSCORRUPTED;
2664 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2665 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2666 XFS_ERRLEVEL_LOW, mp, dicp);
2669 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2670 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2671 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2672 error = EFSCORRUPTED;
2675 isize = xfs_icdinode_size(dicp->di_version);
2676 if (unlikely(item->ri_buf[1].i_len > isize)) {
2677 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2678 XFS_ERRLEVEL_LOW, mp, dicp);
2681 "%s: Bad inode log record length %d, rec ptr 0x%p",
2682 __func__, item->ri_buf[1].i_len, item);
2683 error = EFSCORRUPTED;
2687 /* The core is in in-core format */
2688 xfs_dinode_to_disk(dip, dicp);
2690 /* the rest is in on-disk format */
2691 if (item->ri_buf[1].i_len > isize) {
2692 memcpy((char *)dip + isize,
2693 item->ri_buf[1].i_addr + isize,
2694 item->ri_buf[1].i_len - isize);
2697 fields = in_f->ilf_fields;
2698 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2700 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2703 memcpy(XFS_DFORK_DPTR(dip),
2704 &in_f->ilf_u.ilfu_uuid,
2709 if (in_f->ilf_size == 2)
2710 goto write_inode_buffer;
2711 len = item->ri_buf[2].i_len;
2712 src = item->ri_buf[2].i_addr;
2713 ASSERT(in_f->ilf_size <= 4);
2714 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2715 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2716 (len == in_f->ilf_dsize));
2718 switch (fields & XFS_ILOG_DFORK) {
2719 case XFS_ILOG_DDATA:
2721 memcpy(XFS_DFORK_DPTR(dip), src, len);
2724 case XFS_ILOG_DBROOT:
2725 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2726 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2727 XFS_DFORK_DSIZE(dip, mp));
2732 * There are no data fork flags set.
2734 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2739 * If we logged any attribute data, recover it. There may or
2740 * may not have been any other non-core data logged in this
2743 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2744 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2749 len = item->ri_buf[attr_index].i_len;
2750 src = item->ri_buf[attr_index].i_addr;
2751 ASSERT(len == in_f->ilf_asize);
2753 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
2754 case XFS_ILOG_ADATA:
2756 dest = XFS_DFORK_APTR(dip);
2757 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
2758 memcpy(dest, src, len);
2761 case XFS_ILOG_ABROOT:
2762 dest = XFS_DFORK_APTR(dip);
2763 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
2764 len, (xfs_bmdr_block_t*)dest,
2765 XFS_DFORK_ASIZE(dip, mp));
2769 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
2778 /* re-generate the checksum. */
2779 xfs_dinode_calc_crc(log->l_mp, dip);
2781 ASSERT(bp->b_target->bt_mount == mp);
2782 bp->b_iodone = xlog_recover_iodone;
2783 xfs_buf_delwri_queue(bp, buffer_list);
2788 return XFS_ERROR(error);
2792 * Recover QUOTAOFF records. We simply make a note of it in the xlog
2793 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2797 xlog_recover_quotaoff_pass1(
2799 struct xlog_recover_item *item)
2801 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
2805 * The logitem format's flag tells us if this was user quotaoff,
2806 * group/project quotaoff or both.
2808 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
2809 log->l_quotaoffs_flag |= XFS_DQ_USER;
2810 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
2811 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
2812 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
2813 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
2819 * Recover a dquot record
2822 xlog_recover_dquot_pass2(
2824 struct list_head *buffer_list,
2825 struct xlog_recover_item *item)
2827 xfs_mount_t *mp = log->l_mp;
2829 struct xfs_disk_dquot *ddq, *recddq;
2831 xfs_dq_logformat_t *dq_f;
2836 * Filesystems are required to send in quota flags at mount time.
2838 if (mp->m_qflags == 0)
2841 recddq = item->ri_buf[1].i_addr;
2842 if (recddq == NULL) {
2843 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
2844 return XFS_ERROR(EIO);
2846 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
2847 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
2848 item->ri_buf[1].i_len, __func__);
2849 return XFS_ERROR(EIO);
2853 * This type of quotas was turned off, so ignore this record.
2855 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
2857 if (log->l_quotaoffs_flag & type)
2861 * At this point we know that quota was _not_ turned off.
2862 * Since the mount flags are not indicating to us otherwise, this
2863 * must mean that quota is on, and the dquot needs to be replayed.
2864 * Remember that we may not have fully recovered the superblock yet,
2865 * so we can't do the usual trick of looking at the SB quota bits.
2867 * The other possibility, of course, is that the quota subsystem was
2868 * removed since the last mount - ENOSYS.
2870 dq_f = item->ri_buf[0].i_addr;
2872 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2873 "xlog_recover_dquot_pass2 (log copy)");
2875 return XFS_ERROR(EIO);
2876 ASSERT(dq_f->qlf_len == 1);
2878 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
2879 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
2885 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
2888 * At least the magic num portion should be on disk because this
2889 * was among a chunk of dquots created earlier, and we did some
2890 * minimal initialization then.
2892 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
2893 "xlog_recover_dquot_pass2");
2896 return XFS_ERROR(EIO);
2899 memcpy(ddq, recddq, item->ri_buf[1].i_len);
2900 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2901 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
2905 ASSERT(dq_f->qlf_size == 2);
2906 ASSERT(bp->b_target->bt_mount == mp);
2907 bp->b_iodone = xlog_recover_iodone;
2908 xfs_buf_delwri_queue(bp, buffer_list);
2915 * This routine is called to create an in-core extent free intent
2916 * item from the efi format structure which was logged on disk.
2917 * It allocates an in-core efi, copies the extents from the format
2918 * structure into it, and adds the efi to the AIL with the given
2922 xlog_recover_efi_pass2(
2924 struct xlog_recover_item *item,
2928 xfs_mount_t *mp = log->l_mp;
2929 xfs_efi_log_item_t *efip;
2930 xfs_efi_log_format_t *efi_formatp;
2932 efi_formatp = item->ri_buf[0].i_addr;
2934 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
2935 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
2936 &(efip->efi_format)))) {
2937 xfs_efi_item_free(efip);
2940 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
2942 spin_lock(&log->l_ailp->xa_lock);
2944 * xfs_trans_ail_update() drops the AIL lock.
2946 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
2952 * This routine is called when an efd format structure is found in
2953 * a committed transaction in the log. It's purpose is to cancel
2954 * the corresponding efi if it was still in the log. To do this
2955 * it searches the AIL for the efi with an id equal to that in the
2956 * efd format structure. If we find it, we remove the efi from the
2960 xlog_recover_efd_pass2(
2962 struct xlog_recover_item *item)
2964 xfs_efd_log_format_t *efd_formatp;
2965 xfs_efi_log_item_t *efip = NULL;
2966 xfs_log_item_t *lip;
2968 struct xfs_ail_cursor cur;
2969 struct xfs_ail *ailp = log->l_ailp;
2971 efd_formatp = item->ri_buf[0].i_addr;
2972 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
2973 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
2974 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
2975 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
2976 efi_id = efd_formatp->efd_efi_id;
2979 * Search for the efi with the id in the efd format structure
2982 spin_lock(&ailp->xa_lock);
2983 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2984 while (lip != NULL) {
2985 if (lip->li_type == XFS_LI_EFI) {
2986 efip = (xfs_efi_log_item_t *)lip;
2987 if (efip->efi_format.efi_id == efi_id) {
2989 * xfs_trans_ail_delete() drops the
2992 xfs_trans_ail_delete(ailp, lip,
2993 SHUTDOWN_CORRUPT_INCORE);
2994 xfs_efi_item_free(efip);
2995 spin_lock(&ailp->xa_lock);
2999 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3001 xfs_trans_ail_cursor_done(ailp, &cur);
3002 spin_unlock(&ailp->xa_lock);
3008 * This routine is called when an inode create format structure is found in a
3009 * committed transaction in the log. It's purpose is to initialise the inodes
3010 * being allocated on disk. This requires us to get inode cluster buffers that
3011 * match the range to be intialised, stamped with inode templates and written
3012 * by delayed write so that subsequent modifications will hit the cached buffer
3013 * and only need writing out at the end of recovery.
3016 xlog_recover_do_icreate_pass2(
3018 struct list_head *buffer_list,
3019 xlog_recover_item_t *item)
3021 struct xfs_mount *mp = log->l_mp;
3022 struct xfs_icreate_log *icl;
3023 xfs_agnumber_t agno;
3024 xfs_agblock_t agbno;
3027 xfs_agblock_t length;
3029 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3030 if (icl->icl_type != XFS_LI_ICREATE) {
3031 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3035 if (icl->icl_size != 1) {
3036 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3040 agno = be32_to_cpu(icl->icl_ag);
3041 if (agno >= mp->m_sb.sb_agcount) {
3042 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3045 agbno = be32_to_cpu(icl->icl_agbno);
3046 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3047 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3050 isize = be32_to_cpu(icl->icl_isize);
3051 if (isize != mp->m_sb.sb_inodesize) {
3052 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3055 count = be32_to_cpu(icl->icl_count);
3057 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3060 length = be32_to_cpu(icl->icl_length);
3061 if (!length || length >= mp->m_sb.sb_agblocks) {
3062 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3066 /* existing allocation is fixed value */
3067 ASSERT(count == XFS_IALLOC_INODES(mp));
3068 ASSERT(length == XFS_IALLOC_BLOCKS(mp));
3069 if (count != XFS_IALLOC_INODES(mp) ||
3070 length != XFS_IALLOC_BLOCKS(mp)) {
3071 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3076 * Inode buffers can be freed. Do not replay the inode initialisation as
3077 * we could be overwriting something written after this inode buffer was
3080 * XXX: we need to iterate all buffers and only init those that are not
3081 * cancelled. I think that a more fine grained factoring of
3082 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3085 if (xlog_check_buffer_cancelled(log,
3086 XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3089 xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3090 be32_to_cpu(icl->icl_gen));
3095 * Free up any resources allocated by the transaction
3097 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3100 xlog_recover_free_trans(
3101 struct xlog_recover *trans)
3103 xlog_recover_item_t *item, *n;
3106 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3107 /* Free the regions in the item. */
3108 list_del(&item->ri_list);
3109 for (i = 0; i < item->ri_cnt; i++)
3110 kmem_free(item->ri_buf[i].i_addr);
3111 /* Free the item itself */
3112 kmem_free(item->ri_buf);
3115 /* Free the transaction recover structure */
3120 xlog_recover_commit_pass1(
3122 struct xlog_recover *trans,
3123 struct xlog_recover_item *item)
3125 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3127 switch (ITEM_TYPE(item)) {
3129 return xlog_recover_buffer_pass1(log, item);
3130 case XFS_LI_QUOTAOFF:
3131 return xlog_recover_quotaoff_pass1(log, item);
3136 case XFS_LI_ICREATE:
3137 /* nothing to do in pass 1 */
3140 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3141 __func__, ITEM_TYPE(item));
3143 return XFS_ERROR(EIO);
3148 xlog_recover_commit_pass2(
3150 struct xlog_recover *trans,
3151 struct list_head *buffer_list,
3152 struct xlog_recover_item *item)
3154 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3156 switch (ITEM_TYPE(item)) {
3158 return xlog_recover_buffer_pass2(log, buffer_list, item);
3160 return xlog_recover_inode_pass2(log, buffer_list, item);
3162 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3164 return xlog_recover_efd_pass2(log, item);
3166 return xlog_recover_dquot_pass2(log, buffer_list, item);
3167 case XFS_LI_ICREATE:
3168 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3169 case XFS_LI_QUOTAOFF:
3170 /* nothing to do in pass2 */
3173 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3174 __func__, ITEM_TYPE(item));
3176 return XFS_ERROR(EIO);
3181 * Perform the transaction.
3183 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3184 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3187 xlog_recover_commit_trans(
3189 struct xlog_recover *trans,
3192 int error = 0, error2;
3193 xlog_recover_item_t *item;
3194 LIST_HEAD (buffer_list);
3196 hlist_del(&trans->r_list);
3198 error = xlog_recover_reorder_trans(log, trans, pass);
3202 list_for_each_entry(item, &trans->r_itemq, ri_list) {
3204 case XLOG_RECOVER_PASS1:
3205 error = xlog_recover_commit_pass1(log, trans, item);
3207 case XLOG_RECOVER_PASS2:
3208 error = xlog_recover_commit_pass2(log, trans,
3209 &buffer_list, item);
3219 xlog_recover_free_trans(trans);
3222 error2 = xfs_buf_delwri_submit(&buffer_list);
3223 return error ? error : error2;
3227 xlog_recover_unmount_trans(
3229 struct xlog_recover *trans)
3231 /* Do nothing now */
3232 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3237 * There are two valid states of the r_state field. 0 indicates that the
3238 * transaction structure is in a normal state. We have either seen the
3239 * start of the transaction or the last operation we added was not a partial
3240 * operation. If the last operation we added to the transaction was a
3241 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3243 * NOTE: skip LRs with 0 data length.
3246 xlog_recover_process_data(
3248 struct hlist_head rhash[],
3249 struct xlog_rec_header *rhead,
3255 xlog_op_header_t *ohead;
3256 xlog_recover_t *trans;
3262 lp = dp + be32_to_cpu(rhead->h_len);
3263 num_logops = be32_to_cpu(rhead->h_num_logops);
3265 /* check the log format matches our own - else we can't recover */
3266 if (xlog_header_check_recover(log->l_mp, rhead))
3267 return (XFS_ERROR(EIO));
3269 while ((dp < lp) && num_logops) {
3270 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3271 ohead = (xlog_op_header_t *)dp;
3272 dp += sizeof(xlog_op_header_t);
3273 if (ohead->oh_clientid != XFS_TRANSACTION &&
3274 ohead->oh_clientid != XFS_LOG) {
3275 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3276 __func__, ohead->oh_clientid);
3278 return (XFS_ERROR(EIO));
3280 tid = be32_to_cpu(ohead->oh_tid);
3281 hash = XLOG_RHASH(tid);
3282 trans = xlog_recover_find_tid(&rhash[hash], tid);
3283 if (trans == NULL) { /* not found; add new tid */
3284 if (ohead->oh_flags & XLOG_START_TRANS)
3285 xlog_recover_new_tid(&rhash[hash], tid,
3286 be64_to_cpu(rhead->h_lsn));
3288 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3289 xfs_warn(log->l_mp, "%s: bad length 0x%x",
3290 __func__, be32_to_cpu(ohead->oh_len));
3292 return (XFS_ERROR(EIO));
3294 flags = ohead->oh_flags & ~XLOG_END_TRANS;
3295 if (flags & XLOG_WAS_CONT_TRANS)
3296 flags &= ~XLOG_CONTINUE_TRANS;
3298 case XLOG_COMMIT_TRANS:
3299 error = xlog_recover_commit_trans(log,
3302 case XLOG_UNMOUNT_TRANS:
3303 error = xlog_recover_unmount_trans(log, trans);
3305 case XLOG_WAS_CONT_TRANS:
3306 error = xlog_recover_add_to_cont_trans(log,
3308 be32_to_cpu(ohead->oh_len));
3310 case XLOG_START_TRANS:
3311 xfs_warn(log->l_mp, "%s: bad transaction",
3314 error = XFS_ERROR(EIO);
3317 case XLOG_CONTINUE_TRANS:
3318 error = xlog_recover_add_to_trans(log, trans,
3319 dp, be32_to_cpu(ohead->oh_len));
3322 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3325 error = XFS_ERROR(EIO);
3331 dp += be32_to_cpu(ohead->oh_len);
3338 * Process an extent free intent item that was recovered from
3339 * the log. We need to free the extents that it describes.
3342 xlog_recover_process_efi(
3344 xfs_efi_log_item_t *efip)
3346 xfs_efd_log_item_t *efdp;
3351 xfs_fsblock_t startblock_fsb;
3353 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3356 * First check the validity of the extents described by the
3357 * EFI. If any are bad, then assume that all are bad and
3358 * just toss the EFI.
3360 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3361 extp = &(efip->efi_format.efi_extents[i]);
3362 startblock_fsb = XFS_BB_TO_FSB(mp,
3363 XFS_FSB_TO_DADDR(mp, extp->ext_start));
3364 if ((startblock_fsb == 0) ||
3365 (extp->ext_len == 0) ||
3366 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3367 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3369 * This will pull the EFI from the AIL and
3370 * free the memory associated with it.
3372 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3373 xfs_efi_release(efip, efip->efi_format.efi_nextents);
3374 return XFS_ERROR(EIO);
3378 tp = xfs_trans_alloc(mp, 0);
3379 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, 0, 0);
3382 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3384 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3385 extp = &(efip->efi_format.efi_extents[i]);
3386 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3389 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3393 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3394 error = xfs_trans_commit(tp, 0);
3398 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3403 * When this is called, all of the EFIs which did not have
3404 * corresponding EFDs should be in the AIL. What we do now
3405 * is free the extents associated with each one.
3407 * Since we process the EFIs in normal transactions, they
3408 * will be removed at some point after the commit. This prevents
3409 * us from just walking down the list processing each one.
3410 * We'll use a flag in the EFI to skip those that we've already
3411 * processed and use the AIL iteration mechanism's generation
3412 * count to try to speed this up at least a bit.
3414 * When we start, we know that the EFIs are the only things in
3415 * the AIL. As we process them, however, other items are added
3416 * to the AIL. Since everything added to the AIL must come after
3417 * everything already in the AIL, we stop processing as soon as
3418 * we see something other than an EFI in the AIL.
3421 xlog_recover_process_efis(
3424 xfs_log_item_t *lip;
3425 xfs_efi_log_item_t *efip;
3427 struct xfs_ail_cursor cur;
3428 struct xfs_ail *ailp;
3431 spin_lock(&ailp->xa_lock);
3432 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3433 while (lip != NULL) {
3435 * We're done when we see something other than an EFI.
3436 * There should be no EFIs left in the AIL now.
3438 if (lip->li_type != XFS_LI_EFI) {
3440 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3441 ASSERT(lip->li_type != XFS_LI_EFI);
3447 * Skip EFIs that we've already processed.
3449 efip = (xfs_efi_log_item_t *)lip;
3450 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3451 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3455 spin_unlock(&ailp->xa_lock);
3456 error = xlog_recover_process_efi(log->l_mp, efip);
3457 spin_lock(&ailp->xa_lock);
3460 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3463 xfs_trans_ail_cursor_done(ailp, &cur);
3464 spin_unlock(&ailp->xa_lock);
3469 * This routine performs a transaction to null out a bad inode pointer
3470 * in an agi unlinked inode hash bucket.
3473 xlog_recover_clear_agi_bucket(
3475 xfs_agnumber_t agno,
3484 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3485 error = xfs_trans_reserve(tp, 0, XFS_CLEAR_AGI_BUCKET_LOG_RES(mp),
3490 error = xfs_read_agi(mp, tp, agno, &agibp);
3494 agi = XFS_BUF_TO_AGI(agibp);
3495 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3496 offset = offsetof(xfs_agi_t, agi_unlinked) +
3497 (sizeof(xfs_agino_t) * bucket);
3498 xfs_trans_log_buf(tp, agibp, offset,
3499 (offset + sizeof(xfs_agino_t) - 1));
3501 error = xfs_trans_commit(tp, 0);
3507 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3509 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3514 xlog_recover_process_one_iunlink(
3515 struct xfs_mount *mp,
3516 xfs_agnumber_t agno,
3520 struct xfs_buf *ibp;
3521 struct xfs_dinode *dip;
3522 struct xfs_inode *ip;
3526 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3527 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3532 * Get the on disk inode to find the next inode in the bucket.
3534 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3538 ASSERT(ip->i_d.di_nlink == 0);
3539 ASSERT(ip->i_d.di_mode != 0);
3541 /* setup for the next pass */
3542 agino = be32_to_cpu(dip->di_next_unlinked);
3546 * Prevent any DMAPI event from being sent when the reference on
3547 * the inode is dropped.
3549 ip->i_d.di_dmevmask = 0;
3558 * We can't read in the inode this bucket points to, or this inode
3559 * is messed up. Just ditch this bucket of inodes. We will lose
3560 * some inodes and space, but at least we won't hang.
3562 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3563 * clear the inode pointer in the bucket.
3565 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3570 * xlog_iunlink_recover
3572 * This is called during recovery to process any inodes which
3573 * we unlinked but not freed when the system crashed. These
3574 * inodes will be on the lists in the AGI blocks. What we do
3575 * here is scan all the AGIs and fully truncate and free any
3576 * inodes found on the lists. Each inode is removed from the
3577 * lists when it has been fully truncated and is freed. The
3578 * freeing of the inode and its removal from the list must be
3582 xlog_recover_process_iunlinks(
3586 xfs_agnumber_t agno;
3597 * Prevent any DMAPI event from being sent while in this function.
3599 mp_dmevmask = mp->m_dmevmask;
3602 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3604 * Find the agi for this ag.
3606 error = xfs_read_agi(mp, NULL, agno, &agibp);
3609 * AGI is b0rked. Don't process it.
3611 * We should probably mark the filesystem as corrupt
3612 * after we've recovered all the ag's we can....
3617 * Unlock the buffer so that it can be acquired in the normal
3618 * course of the transaction to truncate and free each inode.
3619 * Because we are not racing with anyone else here for the AGI
3620 * buffer, we don't even need to hold it locked to read the
3621 * initial unlinked bucket entries out of the buffer. We keep
3622 * buffer reference though, so that it stays pinned in memory
3623 * while we need the buffer.
3625 agi = XFS_BUF_TO_AGI(agibp);
3626 xfs_buf_unlock(agibp);
3628 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
3629 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
3630 while (agino != NULLAGINO) {
3631 agino = xlog_recover_process_one_iunlink(mp,
3632 agno, agino, bucket);
3635 xfs_buf_rele(agibp);
3638 mp->m_dmevmask = mp_dmevmask;
3642 * Upack the log buffer data and crc check it. If the check fails, issue a
3643 * warning if and only if the CRC in the header is non-zero. This makes the
3644 * check an advisory warning, and the zero CRC check will prevent failure
3645 * warnings from being emitted when upgrading the kernel from one that does not
3646 * add CRCs by default.
3648 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
3649 * corruption failure
3652 xlog_unpack_data_crc(
3653 struct xlog_rec_header *rhead,
3659 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
3660 if (crc != rhead->h_crc) {
3661 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
3662 xfs_alert(log->l_mp,
3663 "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
3664 le32_to_cpu(rhead->h_crc),
3666 xfs_hex_dump(dp, 32);
3670 * If we've detected a log record corruption, then we can't
3671 * recover past this point. Abort recovery if we are enforcing
3672 * CRC protection by punting an error back up the stack.
3674 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
3675 return EFSCORRUPTED;
3683 struct xlog_rec_header *rhead,
3690 error = xlog_unpack_data_crc(rhead, dp, log);
3694 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
3695 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
3696 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
3700 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3701 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
3702 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
3703 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3704 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
3705 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
3714 xlog_valid_rec_header(
3716 struct xlog_rec_header *rhead,
3721 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
3722 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
3723 XFS_ERRLEVEL_LOW, log->l_mp);
3724 return XFS_ERROR(EFSCORRUPTED);
3727 (!rhead->h_version ||
3728 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
3729 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
3730 __func__, be32_to_cpu(rhead->h_version));
3731 return XFS_ERROR(EIO);
3734 /* LR body must have data or it wouldn't have been written */
3735 hlen = be32_to_cpu(rhead->h_len);
3736 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
3737 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
3738 XFS_ERRLEVEL_LOW, log->l_mp);
3739 return XFS_ERROR(EFSCORRUPTED);
3741 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
3742 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
3743 XFS_ERRLEVEL_LOW, log->l_mp);
3744 return XFS_ERROR(EFSCORRUPTED);
3750 * Read the log from tail to head and process the log records found.
3751 * Handle the two cases where the tail and head are in the same cycle
3752 * and where the active portion of the log wraps around the end of
3753 * the physical log separately. The pass parameter is passed through
3754 * to the routines called to process the data and is not looked at
3758 xlog_do_recovery_pass(
3760 xfs_daddr_t head_blk,
3761 xfs_daddr_t tail_blk,
3764 xlog_rec_header_t *rhead;
3767 xfs_buf_t *hbp, *dbp;
3768 int error = 0, h_size;
3769 int bblks, split_bblks;
3770 int hblks, split_hblks, wrapped_hblks;
3771 struct hlist_head rhash[XLOG_RHASH_SIZE];
3773 ASSERT(head_blk != tail_blk);
3776 * Read the header of the tail block and get the iclog buffer size from
3777 * h_size. Use this to tell how many sectors make up the log header.
3779 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
3781 * When using variable length iclogs, read first sector of
3782 * iclog header and extract the header size from it. Get a
3783 * new hbp that is the correct size.
3785 hbp = xlog_get_bp(log, 1);
3789 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3793 rhead = (xlog_rec_header_t *)offset;
3794 error = xlog_valid_rec_header(log, rhead, tail_blk);
3797 h_size = be32_to_cpu(rhead->h_size);
3798 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
3799 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
3800 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
3801 if (h_size % XLOG_HEADER_CYCLE_SIZE)
3804 hbp = xlog_get_bp(log, hblks);
3809 ASSERT(log->l_sectBBsize == 1);
3811 hbp = xlog_get_bp(log, 1);
3812 h_size = XLOG_BIG_RECORD_BSIZE;
3817 dbp = xlog_get_bp(log, BTOBB(h_size));
3823 memset(rhash, 0, sizeof(rhash));
3824 if (tail_blk <= head_blk) {
3825 for (blk_no = tail_blk; blk_no < head_blk; ) {
3826 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3830 rhead = (xlog_rec_header_t *)offset;
3831 error = xlog_valid_rec_header(log, rhead, blk_no);
3835 /* blocks in data section */
3836 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3837 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
3842 error = xlog_unpack_data(rhead, offset, log);
3846 error = xlog_recover_process_data(log,
3847 rhash, rhead, offset, pass);
3850 blk_no += bblks + hblks;
3854 * Perform recovery around the end of the physical log.
3855 * When the head is not on the same cycle number as the tail,
3856 * we can't do a sequential recovery as above.
3859 while (blk_no < log->l_logBBsize) {
3861 * Check for header wrapping around physical end-of-log
3863 offset = hbp->b_addr;
3866 if (blk_no + hblks <= log->l_logBBsize) {
3867 /* Read header in one read */
3868 error = xlog_bread(log, blk_no, hblks, hbp,
3873 /* This LR is split across physical log end */
3874 if (blk_no != log->l_logBBsize) {
3875 /* some data before physical log end */
3876 ASSERT(blk_no <= INT_MAX);
3877 split_hblks = log->l_logBBsize - (int)blk_no;
3878 ASSERT(split_hblks > 0);
3879 error = xlog_bread(log, blk_no,
3887 * Note: this black magic still works with
3888 * large sector sizes (non-512) only because:
3889 * - we increased the buffer size originally
3890 * by 1 sector giving us enough extra space
3891 * for the second read;
3892 * - the log start is guaranteed to be sector
3894 * - we read the log end (LR header start)
3895 * _first_, then the log start (LR header end)
3896 * - order is important.
3898 wrapped_hblks = hblks - split_hblks;
3899 error = xlog_bread_offset(log, 0,
3901 offset + BBTOB(split_hblks));
3905 rhead = (xlog_rec_header_t *)offset;
3906 error = xlog_valid_rec_header(log, rhead,
3907 split_hblks ? blk_no : 0);
3911 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3914 /* Read in data for log record */
3915 if (blk_no + bblks <= log->l_logBBsize) {
3916 error = xlog_bread(log, blk_no, bblks, dbp,
3921 /* This log record is split across the
3922 * physical end of log */
3923 offset = dbp->b_addr;
3925 if (blk_no != log->l_logBBsize) {
3926 /* some data is before the physical
3928 ASSERT(!wrapped_hblks);
3929 ASSERT(blk_no <= INT_MAX);
3931 log->l_logBBsize - (int)blk_no;
3932 ASSERT(split_bblks > 0);
3933 error = xlog_bread(log, blk_no,
3941 * Note: this black magic still works with
3942 * large sector sizes (non-512) only because:
3943 * - we increased the buffer size originally
3944 * by 1 sector giving us enough extra space
3945 * for the second read;
3946 * - the log start is guaranteed to be sector
3948 * - we read the log end (LR header start)
3949 * _first_, then the log start (LR header end)
3950 * - order is important.
3952 error = xlog_bread_offset(log, 0,
3953 bblks - split_bblks, dbp,
3954 offset + BBTOB(split_bblks));
3959 error = xlog_unpack_data(rhead, offset, log);
3963 error = xlog_recover_process_data(log, rhash,
3964 rhead, offset, pass);
3970 ASSERT(blk_no >= log->l_logBBsize);
3971 blk_no -= log->l_logBBsize;
3973 /* read first part of physical log */
3974 while (blk_no < head_blk) {
3975 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3979 rhead = (xlog_rec_header_t *)offset;
3980 error = xlog_valid_rec_header(log, rhead, blk_no);
3984 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3985 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3990 error = xlog_unpack_data(rhead, offset, log);
3994 error = xlog_recover_process_data(log, rhash,
3995 rhead, offset, pass);
3998 blk_no += bblks + hblks;
4010 * Do the recovery of the log. We actually do this in two phases.
4011 * The two passes are necessary in order to implement the function
4012 * of cancelling a record written into the log. The first pass
4013 * determines those things which have been cancelled, and the
4014 * second pass replays log items normally except for those which
4015 * have been cancelled. The handling of the replay and cancellations
4016 * takes place in the log item type specific routines.
4018 * The table of items which have cancel records in the log is allocated
4019 * and freed at this level, since only here do we know when all of
4020 * the log recovery has been completed.
4023 xlog_do_log_recovery(
4025 xfs_daddr_t head_blk,
4026 xfs_daddr_t tail_blk)
4030 ASSERT(head_blk != tail_blk);
4033 * First do a pass to find all of the cancelled buf log items.
4034 * Store them in the buf_cancel_table for use in the second pass.
4036 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4037 sizeof(struct list_head),
4039 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4040 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4042 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4043 XLOG_RECOVER_PASS1);
4045 kmem_free(log->l_buf_cancel_table);
4046 log->l_buf_cancel_table = NULL;
4050 * Then do a second pass to actually recover the items in the log.
4051 * When it is complete free the table of buf cancel items.
4053 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4054 XLOG_RECOVER_PASS2);
4059 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4060 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4064 kmem_free(log->l_buf_cancel_table);
4065 log->l_buf_cancel_table = NULL;
4071 * Do the actual recovery
4076 xfs_daddr_t head_blk,
4077 xfs_daddr_t tail_blk)
4084 * First replay the images in the log.
4086 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4091 * If IO errors happened during recovery, bail out.
4093 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4098 * We now update the tail_lsn since much of the recovery has completed
4099 * and there may be space available to use. If there were no extent
4100 * or iunlinks, we can free up the entire log and set the tail_lsn to
4101 * be the last_sync_lsn. This was set in xlog_find_tail to be the
4102 * lsn of the last known good LR on disk. If there are extent frees
4103 * or iunlinks they will have some entries in the AIL; so we look at
4104 * the AIL to determine how to set the tail_lsn.
4106 xlog_assign_tail_lsn(log->l_mp);
4109 * Now that we've finished replaying all buffer and inode
4110 * updates, re-read in the superblock and reverify it.
4112 bp = xfs_getsb(log->l_mp, 0);
4114 ASSERT(!(XFS_BUF_ISWRITE(bp)));
4116 XFS_BUF_UNASYNC(bp);
4117 bp->b_ops = &xfs_sb_buf_ops;
4118 xfsbdstrat(log->l_mp, bp);
4119 error = xfs_buf_iowait(bp);
4121 xfs_buf_ioerror_alert(bp, __func__);
4127 /* Convert superblock from on-disk format */
4128 sbp = &log->l_mp->m_sb;
4129 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4130 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4131 ASSERT(xfs_sb_good_version(sbp));
4134 /* We've re-read the superblock so re-initialize per-cpu counters */
4135 xfs_icsb_reinit_counters(log->l_mp);
4137 xlog_recover_check_summary(log);
4139 /* Normal transactions can now occur */
4140 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4145 * Perform recovery and re-initialize some log variables in xlog_find_tail.
4147 * Return error or zero.
4153 xfs_daddr_t head_blk, tail_blk;
4156 /* find the tail of the log */
4157 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4160 if (tail_blk != head_blk) {
4161 /* There used to be a comment here:
4163 * disallow recovery on read-only mounts. note -- mount
4164 * checks for ENOSPC and turns it into an intelligent
4166 * ...but this is no longer true. Now, unless you specify
4167 * NORECOVERY (in which case this function would never be
4168 * called), we just go ahead and recover. We do this all
4169 * under the vfs layer, so we can get away with it unless
4170 * the device itself is read-only, in which case we fail.
4172 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4177 * Version 5 superblock log feature mask validation. We know the
4178 * log is dirty so check if there are any unknown log features
4179 * in what we need to recover. If there are unknown features
4180 * (e.g. unsupported transactions, then simply reject the
4181 * attempt at recovery before touching anything.
4183 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4184 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4185 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4187 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4188 "The log can not be fully and/or safely recovered by this kernel.\n"
4189 "Please recover the log on a kernel that supports the unknown features.",
4190 (log->l_mp->m_sb.sb_features_log_incompat &
4191 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4195 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4196 log->l_mp->m_logname ? log->l_mp->m_logname
4199 error = xlog_do_recover(log, head_blk, tail_blk);
4200 log->l_flags |= XLOG_RECOVERY_NEEDED;
4206 * In the first part of recovery we replay inodes and buffers and build
4207 * up the list of extent free items which need to be processed. Here
4208 * we process the extent free items and clean up the on disk unlinked
4209 * inode lists. This is separated from the first part of recovery so
4210 * that the root and real-time bitmap inodes can be read in from disk in
4211 * between the two stages. This is necessary so that we can free space
4212 * in the real-time portion of the file system.
4215 xlog_recover_finish(
4219 * Now we're ready to do the transactions needed for the
4220 * rest of recovery. Start with completing all the extent
4221 * free intent records and then process the unlinked inode
4222 * lists. At this point, we essentially run in normal mode
4223 * except that we're still performing recovery actions
4224 * rather than accepting new requests.
4226 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4228 error = xlog_recover_process_efis(log);
4230 xfs_alert(log->l_mp, "Failed to recover EFIs");
4234 * Sync the log to get all the EFIs out of the AIL.
4235 * This isn't absolutely necessary, but it helps in
4236 * case the unlink transactions would have problems
4237 * pushing the EFIs out of the way.
4239 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4241 xlog_recover_process_iunlinks(log);
4243 xlog_recover_check_summary(log);
4245 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4246 log->l_mp->m_logname ? log->l_mp->m_logname
4248 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4250 xfs_info(log->l_mp, "Ending clean mount");
4258 * Read all of the agf and agi counters and check that they
4259 * are consistent with the superblock counters.
4262 xlog_recover_check_summary(
4269 xfs_agnumber_t agno;
4270 __uint64_t freeblks;
4280 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4281 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4283 xfs_alert(mp, "%s agf read failed agno %d error %d",
4284 __func__, agno, error);
4286 agfp = XFS_BUF_TO_AGF(agfbp);
4287 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4288 be32_to_cpu(agfp->agf_flcount);
4289 xfs_buf_relse(agfbp);
4292 error = xfs_read_agi(mp, NULL, agno, &agibp);
4294 xfs_alert(mp, "%s agi read failed agno %d error %d",
4295 __func__, agno, error);
4297 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
4299 itotal += be32_to_cpu(agi->agi_count);
4300 ifree += be32_to_cpu(agi->agi_freecount);
4301 xfs_buf_relse(agibp);