2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_log_format.h"
21 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_trans.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_error.h"
30 #include "xfs_trace.h"
34 kmem_zone_t *xfs_buf_item_zone;
36 static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
38 return container_of(lip, struct xfs_buf_log_item, bli_item);
41 STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
44 xfs_buf_log_format_size(
45 struct xfs_buf_log_format *blfp)
47 return offsetof(struct xfs_buf_log_format, blf_data_map) +
48 (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
52 * This returns the number of log iovecs needed to log the
55 * It calculates this as 1 iovec for the buf log format structure
56 * and 1 for each stretch of non-contiguous chunks to be logged.
57 * Contiguous chunks are logged in a single iovec.
59 * If the XFS_BLI_STALE flag has been set, then log nothing.
62 xfs_buf_item_size_segment(
63 struct xfs_buf_log_item *bip,
64 struct xfs_buf_log_format *blfp,
68 struct xfs_buf *bp = bip->bli_buf;
72 last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
77 * initial count for a dirty buffer is 2 vectors - the format structure
78 * and the first dirty region.
81 *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
83 while (last_bit != -1) {
85 * This takes the bit number to start looking from and
86 * returns the next set bit from there. It returns -1
87 * if there are no more bits set or the start bit is
88 * beyond the end of the bitmap.
90 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
93 * If we run out of bits, leave the loop,
94 * else if we find a new set of bits bump the number of vecs,
95 * else keep scanning the current set of bits.
99 } else if (next_bit != last_bit + 1) {
102 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
103 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
110 *nbytes += XFS_BLF_CHUNK;
115 * This returns the number of log iovecs needed to log the given buf log item.
117 * It calculates this as 1 iovec for the buf log format structure and 1 for each
118 * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
121 * Discontiguous buffers need a format structure per region that that is being
122 * logged. This makes the changes in the buffer appear to log recovery as though
123 * they came from separate buffers, just like would occur if multiple buffers
124 * were used instead of a single discontiguous buffer. This enables
125 * discontiguous buffers to be in-memory constructs, completely transparent to
126 * what ends up on disk.
128 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
133 struct xfs_log_item *lip,
137 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
140 ASSERT(atomic_read(&bip->bli_refcount) > 0);
141 if (bip->bli_flags & XFS_BLI_STALE) {
143 * The buffer is stale, so all we need to log
144 * is the buf log format structure with the
147 trace_xfs_buf_item_size_stale(bip);
148 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
149 *nvecs += bip->bli_format_count;
150 for (i = 0; i < bip->bli_format_count; i++) {
151 *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
156 ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
158 if (bip->bli_flags & XFS_BLI_ORDERED) {
160 * The buffer has been logged just to order it.
161 * It is not being included in the transaction
162 * commit, so no vectors are used at all.
164 trace_xfs_buf_item_size_ordered(bip);
165 *nvecs = XFS_LOG_VEC_ORDERED;
170 * the vector count is based on the number of buffer vectors we have
171 * dirty bits in. This will only be greater than one when we have a
172 * compound buffer with more than one segment dirty. Hence for compound
173 * buffers we need to track which segment the dirty bits correspond to,
174 * and when we move from one segment to the next increment the vector
175 * count for the extra buf log format structure that will need to be
178 for (i = 0; i < bip->bli_format_count; i++) {
179 xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
182 trace_xfs_buf_item_size(bip);
185 static struct xfs_log_iovec *
186 xfs_buf_item_format_segment(
187 struct xfs_buf_log_item *bip,
188 struct xfs_log_iovec *vecp,
190 struct xfs_buf_log_format *blfp)
192 struct xfs_buf *bp = bip->bli_buf;
201 /* copy the flags across from the base format item */
202 blfp->blf_flags = bip->__bli_format.blf_flags;
205 * Base size is the actual size of the ondisk structure - it reflects
206 * the actual size of the dirty bitmap rather than the size of the in
209 base_size = xfs_buf_log_format_size(blfp);
212 first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
213 if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
215 * If the map is not be dirty in the transaction, mark
216 * the size as zero and do not advance the vector pointer.
222 vecp->i_len = base_size;
223 vecp->i_type = XLOG_REG_TYPE_BFORMAT;
227 if (bip->bli_flags & XFS_BLI_STALE) {
229 * The buffer is stale, so all we need to log
230 * is the buf log format structure with the
233 trace_xfs_buf_item_format_stale(bip);
234 ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
240 * Fill in an iovec for each set of contiguous chunks.
243 last_bit = first_bit;
247 * This takes the bit number to start looking from and
248 * returns the next set bit from there. It returns -1
249 * if there are no more bits set or the start bit is
250 * beyond the end of the bitmap.
252 next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
255 * If we run out of bits fill in the last iovec and get
257 * Else if we start a new set of bits then fill in the
258 * iovec for the series we were looking at and start
259 * counting the bits in the new one.
260 * Else we're still in the same set of bits so just
261 * keep counting and scanning.
263 if (next_bit == -1) {
264 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
265 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
266 vecp->i_len = nbits * XFS_BLF_CHUNK;
267 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
270 } else if (next_bit != last_bit + 1) {
271 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
272 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
273 vecp->i_len = nbits * XFS_BLF_CHUNK;
274 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
277 first_bit = next_bit;
280 } else if (xfs_buf_offset(bp, offset +
281 (next_bit << XFS_BLF_SHIFT)) !=
282 (xfs_buf_offset(bp, offset +
283 (last_bit << XFS_BLF_SHIFT)) +
285 buffer_offset = offset + first_bit * XFS_BLF_CHUNK;
286 vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
287 vecp->i_len = nbits * XFS_BLF_CHUNK;
288 vecp->i_type = XLOG_REG_TYPE_BCHUNK;
291 first_bit = next_bit;
300 blfp->blf_size = nvecs;
305 * This is called to fill in the vector of log iovecs for the
306 * given log buf item. It fills the first entry with a buf log
307 * format structure, and the rest point to contiguous chunks
312 struct xfs_log_item *lip,
313 struct xfs_log_iovec *vecp)
315 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
316 struct xfs_buf *bp = bip->bli_buf;
320 ASSERT(atomic_read(&bip->bli_refcount) > 0);
321 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
322 (bip->bli_flags & XFS_BLI_STALE));
325 * If it is an inode buffer, transfer the in-memory state to the
326 * format flags and clear the in-memory state.
328 * For buffer based inode allocation, we do not transfer
329 * this state if the inode buffer allocation has not yet been committed
330 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
331 * correct replay of the inode allocation.
333 * For icreate item based inode allocation, the buffers aren't written
334 * to the journal during allocation, and hence we should always tag the
335 * buffer as an inode buffer so that the correct unlinked list replay
336 * occurs during recovery.
338 if (bip->bli_flags & XFS_BLI_INODE_BUF) {
339 if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
340 !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
341 xfs_log_item_in_current_chkpt(lip)))
342 bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
343 bip->bli_flags &= ~XFS_BLI_INODE_BUF;
346 if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
349 * The buffer has been logged just to order it. It is not being
350 * included in the transaction commit, so don't format it.
352 trace_xfs_buf_item_format_ordered(bip);
356 for (i = 0; i < bip->bli_format_count; i++) {
357 vecp = xfs_buf_item_format_segment(bip, vecp, offset,
358 &bip->bli_formats[i]);
359 offset += bp->b_maps[i].bm_len;
363 * Check to make sure everything is consistent.
365 trace_xfs_buf_item_format(bip);
369 * This is called to pin the buffer associated with the buf log item in memory
370 * so it cannot be written out.
372 * We also always take a reference to the buffer log item here so that the bli
373 * is held while the item is pinned in memory. This means that we can
374 * unconditionally drop the reference count a transaction holds when the
375 * transaction is completed.
379 struct xfs_log_item *lip)
381 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
383 ASSERT(atomic_read(&bip->bli_refcount) > 0);
384 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
385 (bip->bli_flags & XFS_BLI_ORDERED) ||
386 (bip->bli_flags & XFS_BLI_STALE));
388 trace_xfs_buf_item_pin(bip);
390 atomic_inc(&bip->bli_refcount);
391 atomic_inc(&bip->bli_buf->b_pin_count);
395 * This is called to unpin the buffer associated with the buf log
396 * item which was previously pinned with a call to xfs_buf_item_pin().
398 * Also drop the reference to the buf item for the current transaction.
399 * If the XFS_BLI_STALE flag is set and we are the last reference,
400 * then free up the buf log item and unlock the buffer.
402 * If the remove flag is set we are called from uncommit in the
403 * forced-shutdown path. If that is true and the reference count on
404 * the log item is going to drop to zero we need to free the item's
405 * descriptor in the transaction.
409 struct xfs_log_item *lip,
412 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
413 xfs_buf_t *bp = bip->bli_buf;
414 struct xfs_ail *ailp = lip->li_ailp;
415 int stale = bip->bli_flags & XFS_BLI_STALE;
418 ASSERT(bp->b_fspriv == bip);
419 ASSERT(atomic_read(&bip->bli_refcount) > 0);
421 trace_xfs_buf_item_unpin(bip);
423 freed = atomic_dec_and_test(&bip->bli_refcount);
425 if (atomic_dec_and_test(&bp->b_pin_count))
426 wake_up_all(&bp->b_waiters);
428 if (freed && stale) {
429 ASSERT(bip->bli_flags & XFS_BLI_STALE);
430 ASSERT(xfs_buf_islocked(bp));
431 ASSERT(XFS_BUF_ISSTALE(bp));
432 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
434 trace_xfs_buf_item_unpin_stale(bip);
438 * If we are in a transaction context, we have to
439 * remove the log item from the transaction as we are
440 * about to release our reference to the buffer. If we
441 * don't, the unlock that occurs later in
442 * xfs_trans_uncommit() will try to reference the
443 * buffer which we no longer have a hold on.
446 xfs_trans_del_item(lip);
449 * Since the transaction no longer refers to the buffer,
450 * the buffer should no longer refer to the transaction.
456 * If we get called here because of an IO error, we may
457 * or may not have the item on the AIL. xfs_trans_ail_delete()
458 * will take care of that situation.
459 * xfs_trans_ail_delete() drops the AIL lock.
461 if (bip->bli_flags & XFS_BLI_STALE_INODE) {
462 xfs_buf_do_callbacks(bp);
466 spin_lock(&ailp->xa_lock);
467 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
468 xfs_buf_item_relse(bp);
469 ASSERT(bp->b_fspriv == NULL);
472 } else if (freed && remove) {
474 * There are currently two references to the buffer - the active
475 * LRU reference and the buf log item. What we are about to do
476 * here - simulate a failed IO completion - requires 3
479 * The LRU reference is removed by the xfs_buf_stale() call. The
480 * buf item reference is removed by the xfs_buf_iodone()
481 * callback that is run by xfs_buf_do_callbacks() during ioend
482 * processing (via the bp->b_iodone callback), and then finally
483 * the ioend processing will drop the IO reference if the buffer
484 * is marked XBF_ASYNC.
486 * Hence we need to take an additional reference here so that IO
487 * completion processing doesn't free the buffer prematurely.
491 bp->b_flags |= XBF_ASYNC;
492 xfs_buf_ioerror(bp, EIO);
495 xfs_buf_ioend(bp, 0);
500 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
501 * seconds so as to not spam logs too much on repeated detection of the same
505 DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
509 struct xfs_log_item *lip,
510 struct list_head *buffer_list)
512 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
513 struct xfs_buf *bp = bip->bli_buf;
514 uint rval = XFS_ITEM_SUCCESS;
516 if (xfs_buf_ispinned(bp))
517 return XFS_ITEM_PINNED;
518 if (!xfs_buf_trylock(bp)) {
520 * If we have just raced with a buffer being pinned and it has
521 * been marked stale, we could end up stalling until someone else
522 * issues a log force to unpin the stale buffer. Check for the
523 * race condition here so xfsaild recognizes the buffer is pinned
524 * and queues a log force to move it along.
526 if (xfs_buf_ispinned(bp))
527 return XFS_ITEM_PINNED;
528 return XFS_ITEM_LOCKED;
531 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
533 trace_xfs_buf_item_push(bip);
535 /* has a previous flush failed due to IO errors? */
536 if ((bp->b_flags & XBF_WRITE_FAIL) &&
537 ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
538 xfs_warn(bp->b_target->bt_mount,
539 "Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
540 (long long)bp->b_bn);
543 if (!xfs_buf_delwri_queue(bp, buffer_list))
544 rval = XFS_ITEM_FLUSHING;
550 * Release the buffer associated with the buf log item. If there is no dirty
551 * logged data associated with the buffer recorded in the buf log item, then
552 * free the buf log item and remove the reference to it in the buffer.
554 * This call ignores the recursion count. It is only called when the buffer
555 * should REALLY be unlocked, regardless of the recursion count.
557 * We unconditionally drop the transaction's reference to the log item. If the
558 * item was logged, then another reference was taken when it was pinned, so we
559 * can safely drop the transaction reference now. This also allows us to avoid
560 * potential races with the unpin code freeing the bli by not referencing the
561 * bli after we've dropped the reference count.
563 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
564 * if necessary but do not unlock the buffer. This is for support of
565 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
570 struct xfs_log_item *lip)
572 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
573 struct xfs_buf *bp = bip->bli_buf;
578 /* Clear the buffer's association with this transaction. */
582 * If this is a transaction abort, don't return early. Instead, allow
583 * the brelse to happen. Normally it would be done for stale
584 * (cancelled) buffers at unpin time, but we'll never go through the
585 * pin/unpin cycle if we abort inside commit.
587 aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
589 * Before possibly freeing the buf item, copy the per-transaction state
590 * so we can reference it safely later after clearing it from the
593 flags = bip->bli_flags;
594 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
597 * If the buf item is marked stale, then don't do anything. We'll
598 * unlock the buffer and free the buf item when the buffer is unpinned
601 if (flags & XFS_BLI_STALE) {
602 trace_xfs_buf_item_unlock_stale(bip);
603 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
605 atomic_dec(&bip->bli_refcount);
610 trace_xfs_buf_item_unlock(bip);
613 * If the buf item isn't tracking any data, free it, otherwise drop the
614 * reference we hold to it. If we are aborting the transaction, this may
615 * be the only reference to the buf item, so we free it anyway
616 * regardless of whether it is dirty or not. A dirty abort implies a
619 * Ordered buffers are dirty but may have no recorded changes, so ensure
620 * we only release clean items here.
622 clean = (flags & XFS_BLI_DIRTY) ? false : true;
625 for (i = 0; i < bip->bli_format_count; i++) {
626 if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
627 bip->bli_formats[i].blf_map_size)) {
635 * Clean buffers, by definition, cannot be in the AIL. However, aborted
636 * buffers may be dirty and hence in the AIL. Therefore if we are
637 * aborting a buffer and we've just taken the last refernce away, we
638 * have to check if it is in the AIL before freeing it. We need to free
639 * it in this case, because an aborted transaction has already shut the
640 * filesystem down and this is the last chance we will have to do so.
642 if (atomic_dec_and_test(&bip->bli_refcount)) {
644 xfs_buf_item_relse(bp);
646 ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
647 if (lip->li_flags & XFS_LI_IN_AIL) {
648 spin_lock(&lip->li_ailp->xa_lock);
649 xfs_trans_ail_delete(lip->li_ailp, lip,
650 SHUTDOWN_LOG_IO_ERROR);
652 xfs_buf_item_relse(bp);
656 if (!(flags & XFS_BLI_HOLD))
661 * This is called to find out where the oldest active copy of the
662 * buf log item in the on disk log resides now that the last log
663 * write of it completed at the given lsn.
664 * We always re-log all the dirty data in a buffer, so usually the
665 * latest copy in the on disk log is the only one that matters. For
666 * those cases we simply return the given lsn.
668 * The one exception to this is for buffers full of newly allocated
669 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
670 * flag set, indicating that only the di_next_unlinked fields from the
671 * inodes in the buffers will be replayed during recovery. If the
672 * original newly allocated inode images have not yet been flushed
673 * when the buffer is so relogged, then we need to make sure that we
674 * keep the old images in the 'active' portion of the log. We do this
675 * by returning the original lsn of that transaction here rather than
679 xfs_buf_item_committed(
680 struct xfs_log_item *lip,
683 struct xfs_buf_log_item *bip = BUF_ITEM(lip);
685 trace_xfs_buf_item_committed(bip);
687 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
693 xfs_buf_item_committing(
694 struct xfs_log_item *lip,
695 xfs_lsn_t commit_lsn)
700 * This is the ops vector shared by all buf log items.
702 static const struct xfs_item_ops xfs_buf_item_ops = {
703 .iop_size = xfs_buf_item_size,
704 .iop_format = xfs_buf_item_format,
705 .iop_pin = xfs_buf_item_pin,
706 .iop_unpin = xfs_buf_item_unpin,
707 .iop_unlock = xfs_buf_item_unlock,
708 .iop_committed = xfs_buf_item_committed,
709 .iop_push = xfs_buf_item_push,
710 .iop_committing = xfs_buf_item_committing
714 xfs_buf_item_get_format(
715 struct xfs_buf_log_item *bip,
718 ASSERT(bip->bli_formats == NULL);
719 bip->bli_format_count = count;
722 bip->bli_formats = &bip->__bli_format;
726 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
728 if (!bip->bli_formats)
734 xfs_buf_item_free_format(
735 struct xfs_buf_log_item *bip)
737 if (bip->bli_formats != &bip->__bli_format) {
738 kmem_free(bip->bli_formats);
739 bip->bli_formats = NULL;
744 * Allocate a new buf log item to go with the given buffer.
745 * Set the buffer's b_fsprivate field to point to the new
746 * buf log item. If there are other item's attached to the
747 * buffer (see xfs_buf_attach_iodone() below), then put the
748 * buf log item at the front.
755 xfs_log_item_t *lip = bp->b_fspriv;
756 xfs_buf_log_item_t *bip;
763 * Check to see if there is already a buf log item for
764 * this buffer. If there is, it is guaranteed to be
765 * the first. If we do already have one, there is
766 * nothing to do here so return.
768 ASSERT(bp->b_target->bt_mount == mp);
769 if (lip != NULL && lip->li_type == XFS_LI_BUF)
772 bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
773 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
778 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
779 * can be divided into. Make sure not to truncate any pieces.
780 * map_size is the size of the bitmap needed to describe the
781 * chunks of the buffer.
783 * Discontiguous buffer support follows the layout of the underlying
784 * buffer. This makes the implementation as simple as possible.
786 error = xfs_buf_item_get_format(bip, bp->b_map_count);
789 for (i = 0; i < bip->bli_format_count; i++) {
790 chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
792 map_size = DIV_ROUND_UP(chunks, NBWORD);
794 bip->bli_formats[i].blf_type = XFS_LI_BUF;
795 bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
796 bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
797 bip->bli_formats[i].blf_map_size = map_size;
800 #ifdef XFS_TRANS_DEBUG
802 * Allocate the arrays for tracking what needs to be logged
803 * and what our callers request to be logged. bli_orig
804 * holds a copy of the original, clean buffer for comparison
805 * against, and bli_logged keeps a 1 bit flag per byte in
806 * the buffer to indicate which bytes the callers have asked
809 bip->bli_orig = kmem_alloc(BBTOB(bp->b_length), KM_SLEEP);
810 memcpy(bip->bli_orig, bp->b_addr, BBTOB(bp->b_length));
811 bip->bli_logged = kmem_zalloc(BBTOB(bp->b_length) / NBBY, KM_SLEEP);
815 * Put the buf item into the list of items attached to the
816 * buffer at the front.
819 bip->bli_item.li_bio_list = bp->b_fspriv;
825 * Mark bytes first through last inclusive as dirty in the buf
829 xfs_buf_item_log_segment(
830 struct xfs_buf_log_item *bip,
846 * Convert byte offsets to bit numbers.
848 first_bit = first >> XFS_BLF_SHIFT;
849 last_bit = last >> XFS_BLF_SHIFT;
852 * Calculate the total number of bits to be set.
854 bits_to_set = last_bit - first_bit + 1;
857 * Get a pointer to the first word in the bitmap
860 word_num = first_bit >> BIT_TO_WORD_SHIFT;
861 wordp = &map[word_num];
864 * Calculate the starting bit in the first word.
866 bit = first_bit & (uint)(NBWORD - 1);
869 * First set any bits in the first word of our range.
870 * If it starts at bit 0 of the word, it will be
871 * set below rather than here. That is what the variable
872 * bit tells us. The variable bits_set tracks the number
873 * of bits that have been set so far. End_bit is the number
874 * of the last bit to be set in this word plus one.
877 end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
878 mask = ((1 << (end_bit - bit)) - 1) << bit;
881 bits_set = end_bit - bit;
887 * Now set bits a whole word at a time that are between
888 * first_bit and last_bit.
890 while ((bits_to_set - bits_set) >= NBWORD) {
891 *wordp |= 0xffffffff;
897 * Finally, set any bits left to be set in one last partial word.
899 end_bit = bits_to_set - bits_set;
901 mask = (1 << end_bit) - 1;
907 * Mark bytes first through last inclusive as dirty in the buf
912 xfs_buf_log_item_t *bip,
919 struct xfs_buf *bp = bip->bli_buf;
922 * walk each buffer segment and mark them dirty appropriately.
925 for (i = 0; i < bip->bli_format_count; i++) {
928 end = start + BBTOB(bp->b_maps[i].bm_len);
930 start += BBTOB(bp->b_maps[i].bm_len);
938 xfs_buf_item_log_segment(bip, first, end,
939 &bip->bli_formats[i].blf_data_map[0]);
941 start += bp->b_maps[i].bm_len;
947 * Return 1 if the buffer has been logged or ordered in a transaction (at any
948 * point, not just the current transaction) and 0 if not.
952 xfs_buf_log_item_t *bip)
954 return (bip->bli_flags & XFS_BLI_DIRTY);
959 xfs_buf_log_item_t *bip)
961 #ifdef XFS_TRANS_DEBUG
962 kmem_free(bip->bli_orig);
963 kmem_free(bip->bli_logged);
964 #endif /* XFS_TRANS_DEBUG */
966 xfs_buf_item_free_format(bip);
967 kmem_zone_free(xfs_buf_item_zone, bip);
971 * This is called when the buf log item is no longer needed. It should
972 * free the buf log item associated with the given buffer and clear
973 * the buffer's pointer to the buf log item. If there are no more
974 * items in the list, clear the b_iodone field of the buffer (see
975 * xfs_buf_attach_iodone() below).
981 xfs_buf_log_item_t *bip = bp->b_fspriv;
983 trace_xfs_buf_item_relse(bp, _RET_IP_);
984 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
986 bp->b_fspriv = bip->bli_item.li_bio_list;
987 if (bp->b_fspriv == NULL)
991 xfs_buf_item_free(bip);
996 * Add the given log item with its callback to the list of callbacks
997 * to be called when the buffer's I/O completes. If it is not set
998 * already, set the buffer's b_iodone() routine to be
999 * xfs_buf_iodone_callbacks() and link the log item into the list of
1000 * items rooted at b_fsprivate. Items are always added as the second
1001 * entry in the list if there is a first, because the buf item code
1002 * assumes that the buf log item is first.
1005 xfs_buf_attach_iodone(
1007 void (*cb)(xfs_buf_t *, xfs_log_item_t *),
1008 xfs_log_item_t *lip)
1010 xfs_log_item_t *head_lip;
1012 ASSERT(xfs_buf_islocked(bp));
1015 head_lip = bp->b_fspriv;
1017 lip->li_bio_list = head_lip->li_bio_list;
1018 head_lip->li_bio_list = lip;
1023 ASSERT(bp->b_iodone == NULL ||
1024 bp->b_iodone == xfs_buf_iodone_callbacks);
1025 bp->b_iodone = xfs_buf_iodone_callbacks;
1029 * We can have many callbacks on a buffer. Running the callbacks individually
1030 * can cause a lot of contention on the AIL lock, so we allow for a single
1031 * callback to be able to scan the remaining lip->li_bio_list for other items
1032 * of the same type and callback to be processed in the first call.
1034 * As a result, the loop walking the callback list below will also modify the
1035 * list. it removes the first item from the list and then runs the callback.
1036 * The loop then restarts from the new head of the list. This allows the
1037 * callback to scan and modify the list attached to the buffer and we don't
1038 * have to care about maintaining a next item pointer.
1041 xfs_buf_do_callbacks(
1044 struct xfs_log_item *lip;
1046 while ((lip = bp->b_fspriv) != NULL) {
1047 bp->b_fspriv = lip->li_bio_list;
1048 ASSERT(lip->li_cb != NULL);
1050 * Clear the next pointer so we don't have any
1051 * confusion if the item is added to another buf.
1052 * Don't touch the log item after calling its
1053 * callback, because it could have freed itself.
1055 lip->li_bio_list = NULL;
1056 lip->li_cb(bp, lip);
1061 * This is the iodone() function for buffers which have had callbacks
1062 * attached to them by xfs_buf_attach_iodone(). It should remove each
1063 * log item from the buffer's list and call the callback of each in turn.
1064 * When done, the buffer's fsprivate field is set to NULL and the buffer
1065 * is unlocked with a call to iodone().
1068 xfs_buf_iodone_callbacks(
1071 struct xfs_log_item *lip = bp->b_fspriv;
1072 struct xfs_mount *mp = lip->li_mountp;
1073 static ulong lasttime;
1074 static xfs_buftarg_t *lasttarg;
1076 if (likely(!xfs_buf_geterror(bp)))
1080 * If we've already decided to shutdown the filesystem because of
1081 * I/O errors, there's no point in giving this a retry.
1083 if (XFS_FORCED_SHUTDOWN(mp)) {
1086 trace_xfs_buf_item_iodone(bp, _RET_IP_);
1090 if (bp->b_target != lasttarg ||
1091 time_after(jiffies, (lasttime + 5*HZ))) {
1093 xfs_buf_ioerror_alert(bp, __func__);
1095 lasttarg = bp->b_target;
1098 * If the write was asynchronous then no one will be looking for the
1099 * error. Clear the error state and write the buffer out again.
1101 * XXX: This helps against transient write errors, but we need to find
1102 * a way to shut the filesystem down if the writes keep failing.
1104 * In practice we'll shut the filesystem down soon as non-transient
1105 * erorrs tend to affect the whole device and a failing log write
1106 * will make us give up. But we really ought to do better here.
1108 if (XFS_BUF_ISASYNC(bp)) {
1109 ASSERT(bp->b_iodone != NULL);
1111 trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
1113 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
1115 if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
1116 bp->b_flags |= XBF_WRITE | XBF_ASYNC |
1117 XBF_DONE | XBF_WRITE_FAIL;
1118 xfs_buf_iorequest(bp);
1127 * If the write of the buffer was synchronous, we want to make
1128 * sure to return the error to the caller of xfs_bwrite().
1133 trace_xfs_buf_error_relse(bp, _RET_IP_);
1136 xfs_buf_do_callbacks(bp);
1137 bp->b_fspriv = NULL;
1138 bp->b_iodone = NULL;
1139 xfs_buf_ioend(bp, 0);
1143 * This is the iodone() function for buffers which have been
1144 * logged. It is called when they are eventually flushed out.
1145 * It should remove the buf item from the AIL, and free the buf item.
1146 * It is called by xfs_buf_iodone_callbacks() above which will take
1147 * care of cleaning up the buffer itself.
1152 struct xfs_log_item *lip)
1154 struct xfs_ail *ailp = lip->li_ailp;
1156 ASSERT(BUF_ITEM(lip)->bli_buf == bp);
1161 * If we are forcibly shutting down, this may well be
1162 * off the AIL already. That's because we simulate the
1163 * log-committed callbacks to unpin these buffers. Or we may never
1164 * have put this item on AIL because of the transaction was
1165 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
1167 * Either way, AIL is useless if we're forcing a shutdown.
1169 spin_lock(&ailp->xa_lock);
1170 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1171 xfs_buf_item_free(BUF_ITEM(lip));