2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
45 struct buffer_head *bh, *head;
47 *delalloc = *unwritten = 0;
49 bh = head = page_buffers(page);
51 if (buffer_unwritten(bh))
53 else if (buffer_delay(bh))
55 } while ((bh = bh->b_this_page) != head);
58 STATIC struct block_device *
59 xfs_find_bdev_for_inode(
62 struct xfs_inode *ip = XFS_I(inode);
63 struct xfs_mount *mp = ip->i_mount;
65 if (XFS_IS_REALTIME_INODE(ip))
66 return mp->m_rtdev_targp->bt_bdev;
68 return mp->m_ddev_targp->bt_bdev;
72 * We're now finished for good with this ioend structure.
73 * Update the page state via the associated buffer_heads,
74 * release holds on the inode and bio, and finally free
75 * up memory. Do not use the ioend after this.
81 struct buffer_head *bh, *next;
83 for (bh = ioend->io_buffer_head; bh; bh = next) {
85 bh->b_end_io(bh, !ioend->io_error);
88 mempool_free(ioend, xfs_ioend_pool);
92 * Fast and loose check if this write could update the on-disk inode size.
94 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
96 return ioend->io_offset + ioend->io_size >
97 XFS_I(ioend->io_inode)->i_d.di_size;
101 xfs_setfilesize_trans_alloc(
102 struct xfs_ioend *ioend)
104 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
105 struct xfs_trans *tp;
108 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
110 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
112 xfs_trans_cancel(tp);
116 ioend->io_append_trans = tp;
119 * We may pass freeze protection with a transaction. So tell lockdep
122 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
124 * We hand off the transaction to the completion thread now, so
125 * clear the flag here.
127 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
132 * Update on-disk file size now that data has been written to disk.
136 struct xfs_inode *ip,
137 struct xfs_trans *tp,
143 xfs_ilock(ip, XFS_ILOCK_EXCL);
144 isize = xfs_new_eof(ip, offset + size);
146 xfs_iunlock(ip, XFS_ILOCK_EXCL);
147 xfs_trans_cancel(tp);
151 trace_xfs_setfilesize(ip, offset, size);
153 ip->i_d.di_size = isize;
154 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
155 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
157 return xfs_trans_commit(tp);
161 xfs_setfilesize_ioend(
162 struct xfs_ioend *ioend)
164 struct xfs_inode *ip = XFS_I(ioend->io_inode);
165 struct xfs_trans *tp = ioend->io_append_trans;
168 * The transaction may have been allocated in the I/O submission thread,
169 * thus we need to mark ourselves as being in a transaction manually.
170 * Similarly for freeze protection.
172 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
173 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
175 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
179 * Schedule IO completion handling on the final put of an ioend.
181 * If there is no work to do we might as well call it a day and free the
186 struct xfs_ioend *ioend)
188 if (atomic_dec_and_test(&ioend->io_remaining)) {
189 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
191 if (ioend->io_type == XFS_IO_UNWRITTEN)
192 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
193 else if (ioend->io_append_trans)
194 queue_work(mp->m_data_workqueue, &ioend->io_work);
196 xfs_destroy_ioend(ioend);
201 * IO write completion.
205 struct work_struct *work)
207 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
208 struct xfs_inode *ip = XFS_I(ioend->io_inode);
211 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
212 ioend->io_error = -EIO;
219 * For unwritten extents we need to issue transactions to convert a
220 * range to normal written extens after the data I/O has finished.
222 if (ioend->io_type == XFS_IO_UNWRITTEN) {
223 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
225 } else if (ioend->io_append_trans) {
226 error = xfs_setfilesize_ioend(ioend);
228 ASSERT(!xfs_ioend_is_append(ioend));
233 ioend->io_error = error;
234 xfs_destroy_ioend(ioend);
238 * Allocate and initialise an IO completion structure.
239 * We need to track unwritten extent write completion here initially.
240 * We'll need to extend this for updating the ondisk inode size later
250 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
253 * Set the count to 1 initially, which will prevent an I/O
254 * completion callback from happening before we have started
255 * all the I/O from calling the completion routine too early.
257 atomic_set(&ioend->io_remaining, 1);
259 ioend->io_list = NULL;
260 ioend->io_type = type;
261 ioend->io_inode = inode;
262 ioend->io_buffer_head = NULL;
263 ioend->io_buffer_tail = NULL;
264 ioend->io_offset = 0;
266 ioend->io_append_trans = NULL;
268 INIT_WORK(&ioend->io_work, xfs_end_io);
276 struct xfs_bmbt_irec *imap,
280 struct xfs_inode *ip = XFS_I(inode);
281 struct xfs_mount *mp = ip->i_mount;
282 ssize_t count = 1 << inode->i_blkbits;
283 xfs_fileoff_t offset_fsb, end_fsb;
285 int bmapi_flags = XFS_BMAPI_ENTIRE;
288 if (XFS_FORCED_SHUTDOWN(mp))
291 if (type == XFS_IO_UNWRITTEN)
292 bmapi_flags |= XFS_BMAPI_IGSTATE;
294 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
297 xfs_ilock(ip, XFS_ILOCK_SHARED);
300 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
301 (ip->i_df.if_flags & XFS_IFEXTENTS));
302 ASSERT(offset <= mp->m_super->s_maxbytes);
304 if (offset + count > mp->m_super->s_maxbytes)
305 count = mp->m_super->s_maxbytes - offset;
306 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
307 offset_fsb = XFS_B_TO_FSBT(mp, offset);
308 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
309 imap, &nimaps, bmapi_flags);
310 xfs_iunlock(ip, XFS_ILOCK_SHARED);
315 if (type == XFS_IO_DELALLOC &&
316 (!nimaps || isnullstartblock(imap->br_startblock))) {
317 error = xfs_iomap_write_allocate(ip, offset, imap);
319 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
324 if (type == XFS_IO_UNWRITTEN) {
326 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
327 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
331 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
338 struct xfs_bmbt_irec *imap,
341 offset >>= inode->i_blkbits;
343 return offset >= imap->br_startoff &&
344 offset < imap->br_startoff + imap->br_blockcount;
348 * BIO completion handler for buffered IO.
354 xfs_ioend_t *ioend = bio->bi_private;
356 if (!ioend->io_error)
357 ioend->io_error = bio->bi_error;
359 /* Toss bio and pass work off to an xfsdatad thread */
360 bio->bi_private = NULL;
361 bio->bi_end_io = NULL;
364 xfs_finish_ioend(ioend);
368 xfs_submit_ioend_bio(
369 struct writeback_control *wbc,
373 atomic_inc(&ioend->io_remaining);
374 bio->bi_private = ioend;
375 bio->bi_end_io = xfs_end_bio;
376 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
381 struct buffer_head *bh)
383 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
385 ASSERT(bio->bi_private == NULL);
386 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
387 bio->bi_bdev = bh->b_bdev;
392 xfs_start_buffer_writeback(
393 struct buffer_head *bh)
395 ASSERT(buffer_mapped(bh));
396 ASSERT(buffer_locked(bh));
397 ASSERT(!buffer_delay(bh));
398 ASSERT(!buffer_unwritten(bh));
400 mark_buffer_async_write(bh);
401 set_buffer_uptodate(bh);
402 clear_buffer_dirty(bh);
406 xfs_start_page_writeback(
411 ASSERT(PageLocked(page));
412 ASSERT(!PageWriteback(page));
415 * if the page was not fully cleaned, we need to ensure that the higher
416 * layers come back to it correctly. That means we need to keep the page
417 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
418 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
419 * write this page in this writeback sweep will be made.
422 clear_page_dirty_for_io(page);
423 set_page_writeback(page);
425 set_page_writeback_keepwrite(page);
429 /* If no buffers on the page are to be written, finish it here */
431 end_page_writeback(page);
434 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
436 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
440 * Submit all of the bios for all of the ioends we have saved up, covering the
441 * initial writepage page and also any probed pages.
443 * Because we may have multiple ioends spanning a page, we need to start
444 * writeback on all the buffers before we submit them for I/O. If we mark the
445 * buffers as we got, then we can end up with a page that only has buffers
446 * marked async write and I/O complete on can occur before we mark the other
447 * buffers async write.
449 * The end result of this is that we trip a bug in end_page_writeback() because
450 * we call it twice for the one page as the code in end_buffer_async_write()
451 * assumes that all buffers on the page are started at the same time.
453 * The fix is two passes across the ioend list - one to start writeback on the
454 * buffer_heads, and then submit them for I/O on the second pass.
456 * If @fail is non-zero, it means that we have a situation where some part of
457 * the submission process has failed after we have marked paged for writeback
458 * and unlocked them. In this situation, we need to fail the ioend chain rather
459 * than submit it to IO. This typically only happens on a filesystem shutdown.
463 struct writeback_control *wbc,
467 xfs_ioend_t *head = ioend;
469 struct buffer_head *bh;
471 sector_t lastblock = 0;
473 /* Pass 1 - start writeback */
475 next = ioend->io_list;
476 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
477 xfs_start_buffer_writeback(bh);
478 } while ((ioend = next) != NULL);
480 /* Pass 2 - submit I/O */
483 next = ioend->io_list;
487 * If we are failing the IO now, just mark the ioend with an
488 * error and finish it. This will run IO completion immediately
489 * as there is only one reference to the ioend at this point in
493 ioend->io_error = fail;
494 xfs_finish_ioend(ioend);
498 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
502 bio = xfs_alloc_ioend_bio(bh);
503 } else if (bh->b_blocknr != lastblock + 1) {
504 xfs_submit_ioend_bio(wbc, ioend, bio);
508 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
509 xfs_submit_ioend_bio(wbc, ioend, bio);
513 lastblock = bh->b_blocknr;
516 xfs_submit_ioend_bio(wbc, ioend, bio);
517 xfs_finish_ioend(ioend);
518 } while ((ioend = next) != NULL);
522 * Cancel submission of all buffer_heads so far in this endio.
523 * Toss the endio too. Only ever called for the initial page
524 * in a writepage request, so only ever one page.
531 struct buffer_head *bh, *next_bh;
534 next = ioend->io_list;
535 bh = ioend->io_buffer_head;
537 next_bh = bh->b_private;
538 clear_buffer_async_write(bh);
540 * The unwritten flag is cleared when added to the
541 * ioend. We're not submitting for I/O so mark the
542 * buffer unwritten again for next time around.
544 if (ioend->io_type == XFS_IO_UNWRITTEN)
545 set_buffer_unwritten(bh);
547 } while ((bh = next_bh) != NULL);
549 mempool_free(ioend, xfs_ioend_pool);
550 } while ((ioend = next) != NULL);
554 * Test to see if we've been building up a completion structure for
555 * earlier buffers -- if so, we try to append to this ioend if we
556 * can, otherwise we finish off any current ioend and start another.
557 * Return true if we've finished the given ioend.
562 struct buffer_head *bh,
565 xfs_ioend_t **result,
568 xfs_ioend_t *ioend = *result;
570 if (!ioend || need_ioend || type != ioend->io_type) {
571 xfs_ioend_t *previous = *result;
573 ioend = xfs_alloc_ioend(inode, type);
574 ioend->io_offset = offset;
575 ioend->io_buffer_head = bh;
576 ioend->io_buffer_tail = bh;
578 previous->io_list = ioend;
581 ioend->io_buffer_tail->b_private = bh;
582 ioend->io_buffer_tail = bh;
585 bh->b_private = NULL;
586 ioend->io_size += bh->b_size;
592 struct buffer_head *bh,
593 struct xfs_bmbt_irec *imap,
597 struct xfs_mount *m = XFS_I(inode)->i_mount;
598 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
599 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
601 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
602 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
604 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
605 ((offset - iomap_offset) >> inode->i_blkbits);
607 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
610 set_buffer_mapped(bh);
616 struct buffer_head *bh,
617 struct xfs_bmbt_irec *imap,
620 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
621 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
623 xfs_map_buffer(inode, bh, imap, offset);
624 set_buffer_mapped(bh);
625 clear_buffer_delay(bh);
626 clear_buffer_unwritten(bh);
630 * Test if a given page contains at least one buffer of a given @type.
631 * If @check_all_buffers is true, then we walk all the buffers in the page to
632 * try to find one of the type passed in. If it is not set, then the caller only
633 * needs to check the first buffer on the page for a match.
639 bool check_all_buffers)
641 struct buffer_head *bh;
642 struct buffer_head *head;
644 if (PageWriteback(page))
648 if (!page_has_buffers(page))
651 bh = head = page_buffers(page);
653 if (buffer_unwritten(bh)) {
654 if (type == XFS_IO_UNWRITTEN)
656 } else if (buffer_delay(bh)) {
657 if (type == XFS_IO_DELALLOC)
659 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
660 if (type == XFS_IO_OVERWRITE)
664 /* If we are only checking the first buffer, we are done now. */
665 if (!check_all_buffers)
667 } while ((bh = bh->b_this_page) != head);
673 * Allocate & map buffers for page given the extent map. Write it out.
674 * except for the original page of a writepage, this is called on
675 * delalloc/unwritten pages only, for the original page it is possible
676 * that the page has no mapping at all.
683 struct xfs_bmbt_irec *imap,
684 xfs_ioend_t **ioendp,
685 struct writeback_control *wbc)
687 struct buffer_head *bh, *head;
688 xfs_off_t end_offset;
689 unsigned long p_offset;
692 int count = 0, done = 0, uptodate = 1;
693 xfs_off_t offset = page_offset(page);
695 if (page->index != tindex)
697 if (!trylock_page(page))
699 if (PageWriteback(page))
700 goto fail_unlock_page;
701 if (page->mapping != inode->i_mapping)
702 goto fail_unlock_page;
703 if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
704 goto fail_unlock_page;
707 * page_dirty is initially a count of buffers on the page before
708 * EOF and is decremented as we move each into a cleanable state.
712 * End offset is the highest offset that this page should represent.
713 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
714 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
715 * hence give us the correct page_dirty count. On any other page,
716 * it will be zero and in that case we need page_dirty to be the
717 * count of buffers on the page.
719 end_offset = min_t(unsigned long long,
720 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
724 * If the current map does not span the entire page we are about to try
725 * to write, then give up. The only way we can write a page that spans
726 * multiple mappings in a single writeback iteration is via the
727 * xfs_vm_writepage() function. Data integrity writeback requires the
728 * entire page to be written in a single attempt, otherwise the part of
729 * the page we don't write here doesn't get written as part of the data
732 * For normal writeback, we also don't attempt to write partial pages
733 * here as it simply means that write_cache_pages() will see it under
734 * writeback and ignore the page until some point in the future, at
735 * which time this will be the only page in the file that needs
736 * writeback. Hence for more optimal IO patterns, we should always
737 * avoid partial page writeback due to multiple mappings on a page here.
739 if (!xfs_imap_valid(inode, imap, end_offset))
740 goto fail_unlock_page;
742 len = 1 << inode->i_blkbits;
743 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
745 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
746 page_dirty = p_offset / len;
749 * The moment we find a buffer that doesn't match our current type
750 * specification or can't be written, abort the loop and start
751 * writeback. As per the above xfs_imap_valid() check, only
752 * xfs_vm_writepage() can handle partial page writeback fully - we are
753 * limited here to the buffers that are contiguous with the current
754 * ioend, and hence a buffer we can't write breaks that contiguity and
755 * we have to defer the rest of the IO to xfs_vm_writepage().
757 bh = head = page_buffers(page);
759 if (offset >= end_offset)
761 if (!buffer_uptodate(bh))
763 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
768 if (buffer_unwritten(bh) || buffer_delay(bh) ||
770 if (buffer_unwritten(bh))
771 type = XFS_IO_UNWRITTEN;
772 else if (buffer_delay(bh))
773 type = XFS_IO_DELALLOC;
775 type = XFS_IO_OVERWRITE;
778 * imap should always be valid because of the above
779 * partial page end_offset check on the imap.
781 ASSERT(xfs_imap_valid(inode, imap, offset));
784 if (type != XFS_IO_OVERWRITE)
785 xfs_map_at_offset(inode, bh, imap, offset);
786 xfs_add_to_ioend(inode, bh, offset, type,
795 } while (offset += len, (bh = bh->b_this_page) != head);
797 if (uptodate && bh == head)
798 SetPageUptodate(page);
801 if (--wbc->nr_to_write <= 0 &&
802 wbc->sync_mode == WB_SYNC_NONE)
805 xfs_start_page_writeback(page, !page_dirty, count);
815 * Convert & write out a cluster of pages in the same extent as defined
816 * by mp and following the start page.
822 struct xfs_bmbt_irec *imap,
823 xfs_ioend_t **ioendp,
824 struct writeback_control *wbc,
830 pagevec_init(&pvec, 0);
831 while (!done && tindex <= tlast) {
832 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
834 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
837 for (i = 0; i < pagevec_count(&pvec); i++) {
838 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
844 pagevec_release(&pvec);
850 xfs_vm_invalidatepage(
855 trace_xfs_invalidatepage(page->mapping->host, page, offset,
857 block_invalidatepage(page, offset, length);
861 * If the page has delalloc buffers on it, we need to punch them out before we
862 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
863 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
864 * is done on that same region - the delalloc extent is returned when none is
865 * supposed to be there.
867 * We prevent this by truncating away the delalloc regions on the page before
868 * invalidating it. Because they are delalloc, we can do this without needing a
869 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
870 * truncation without a transaction as there is no space left for block
871 * reservation (typically why we see a ENOSPC in writeback).
873 * This is not a performance critical path, so for now just do the punching a
874 * buffer head at a time.
877 xfs_aops_discard_page(
880 struct inode *inode = page->mapping->host;
881 struct xfs_inode *ip = XFS_I(inode);
882 struct buffer_head *bh, *head;
883 loff_t offset = page_offset(page);
885 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
888 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
891 xfs_alert(ip->i_mount,
892 "page discard on page %p, inode 0x%llx, offset %llu.",
893 page, ip->i_ino, offset);
895 xfs_ilock(ip, XFS_ILOCK_EXCL);
896 bh = head = page_buffers(page);
899 xfs_fileoff_t start_fsb;
901 if (!buffer_delay(bh))
904 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
905 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
907 /* something screwed, just bail */
908 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
909 xfs_alert(ip->i_mount,
910 "page discard unable to remove delalloc mapping.");
915 offset += 1 << inode->i_blkbits;
917 } while ((bh = bh->b_this_page) != head);
919 xfs_iunlock(ip, XFS_ILOCK_EXCL);
921 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
926 * Write out a dirty page.
928 * For delalloc space on the page we need to allocate space and flush it.
929 * For unwritten space on the page we need to start the conversion to
930 * regular allocated space.
931 * For any other dirty buffer heads on the page we should flush them.
936 struct writeback_control *wbc)
938 struct inode *inode = page->mapping->host;
939 struct buffer_head *bh, *head;
940 struct xfs_bmbt_irec imap;
941 xfs_ioend_t *ioend = NULL, *iohead = NULL;
944 __uint64_t end_offset;
945 pgoff_t end_index, last_index;
947 int err, imap_valid = 0, uptodate = 1;
951 trace_xfs_writepage(inode, page, 0, 0);
953 ASSERT(page_has_buffers(page));
956 * Refuse to write the page out if we are called from reclaim context.
958 * This avoids stack overflows when called from deeply used stacks in
959 * random callers for direct reclaim or memcg reclaim. We explicitly
960 * allow reclaim from kswapd as the stack usage there is relatively low.
962 * This should never happen except in the case of a VM regression so
965 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
970 * Given that we do not allow direct reclaim to call us, we should
971 * never be called while in a filesystem transaction.
973 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
976 /* Is this page beyond the end of the file? */
977 offset = i_size_read(inode);
978 end_index = offset >> PAGE_CACHE_SHIFT;
979 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
982 * The page index is less than the end_index, adjust the end_offset
983 * to the highest offset that this page should represent.
984 * -----------------------------------------------------
985 * | file mapping | <EOF> |
986 * -----------------------------------------------------
987 * | Page ... | Page N-2 | Page N-1 | Page N | |
988 * ^--------------------------------^----------|--------
989 * | desired writeback range | see else |
990 * ---------------------------------^------------------|
992 if (page->index < end_index)
993 end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT;
996 * Check whether the page to write out is beyond or straddles
998 * -------------------------------------------------------
999 * | file mapping | <EOF> |
1000 * -------------------------------------------------------
1001 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1002 * ^--------------------------------^-----------|---------
1004 * ---------------------------------^-----------|--------|
1006 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
1009 * Skip the page if it is fully outside i_size, e.g. due to a
1010 * truncate operation that is in progress. We must redirty the
1011 * page so that reclaim stops reclaiming it. Otherwise
1012 * xfs_vm_releasepage() is called on it and gets confused.
1014 * Note that the end_index is unsigned long, it would overflow
1015 * if the given offset is greater than 16TB on 32-bit system
1016 * and if we do check the page is fully outside i_size or not
1017 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1018 * will be evaluated to 0. Hence this page will be redirtied
1019 * and be written out repeatedly which would result in an
1020 * infinite loop, the user program that perform this operation
1021 * will hang. Instead, we can verify this situation by checking
1022 * if the page to write is totally beyond the i_size or if it's
1023 * offset is just equal to the EOF.
1025 if (page->index > end_index ||
1026 (page->index == end_index && offset_into_page == 0))
1030 * The page straddles i_size. It must be zeroed out on each
1031 * and every writepage invocation because it may be mmapped.
1032 * "A file is mapped in multiples of the page size. For a file
1033 * that is not a multiple of the page size, the remaining
1034 * memory is zeroed when mapped, and writes to that region are
1035 * not written out to the file."
1037 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
1039 /* Adjust the end_offset to the end of file */
1040 end_offset = offset;
1043 len = 1 << inode->i_blkbits;
1045 bh = head = page_buffers(page);
1046 offset = page_offset(page);
1047 type = XFS_IO_OVERWRITE;
1049 if (wbc->sync_mode == WB_SYNC_NONE)
1055 if (offset >= end_offset)
1057 if (!buffer_uptodate(bh))
1061 * set_page_dirty dirties all buffers in a page, independent
1062 * of their state. The dirty state however is entirely
1063 * meaningless for holes (!mapped && uptodate), so skip
1064 * buffers covering holes here.
1066 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1071 if (buffer_unwritten(bh)) {
1072 if (type != XFS_IO_UNWRITTEN) {
1073 type = XFS_IO_UNWRITTEN;
1076 } else if (buffer_delay(bh)) {
1077 if (type != XFS_IO_DELALLOC) {
1078 type = XFS_IO_DELALLOC;
1081 } else if (buffer_uptodate(bh)) {
1082 if (type != XFS_IO_OVERWRITE) {
1083 type = XFS_IO_OVERWRITE;
1087 if (PageUptodate(page))
1088 ASSERT(buffer_mapped(bh));
1090 * This buffer is not uptodate and will not be
1091 * written to disk. Ensure that we will put any
1092 * subsequent writeable buffers into a new
1100 imap_valid = xfs_imap_valid(inode, &imap, offset);
1103 * If we didn't have a valid mapping then we need to
1104 * put the new mapping into a separate ioend structure.
1105 * This ensures non-contiguous extents always have
1106 * separate ioends, which is particularly important
1107 * for unwritten extent conversion at I/O completion
1111 err = xfs_map_blocks(inode, offset, &imap, type,
1115 imap_valid = xfs_imap_valid(inode, &imap, offset);
1119 if (type != XFS_IO_OVERWRITE)
1120 xfs_map_at_offset(inode, bh, &imap, offset);
1121 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1129 } while (offset += len, ((bh = bh->b_this_page) != head));
1131 if (uptodate && bh == head)
1132 SetPageUptodate(page);
1134 xfs_start_page_writeback(page, 1, count);
1136 /* if there is no IO to be submitted for this page, we are done */
1143 * Any errors from this point onwards need tobe reported through the IO
1144 * completion path as we have marked the initial page as under writeback
1148 xfs_off_t end_index;
1150 end_index = imap.br_startoff + imap.br_blockcount;
1153 end_index <<= inode->i_blkbits;
1156 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1158 /* check against file size */
1159 if (end_index > last_index)
1160 end_index = last_index;
1162 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1168 * Reserve log space if we might write beyond the on-disk inode size.
1171 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1172 err = xfs_setfilesize_trans_alloc(ioend);
1174 xfs_submit_ioend(wbc, iohead, err);
1180 xfs_cancel_ioend(iohead);
1185 xfs_aops_discard_page(page);
1186 ClearPageUptodate(page);
1191 redirty_page_for_writepage(wbc, page);
1198 struct address_space *mapping,
1199 struct writeback_control *wbc)
1201 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1202 return generic_writepages(mapping, wbc);
1206 * Called to move a page into cleanable state - and from there
1207 * to be released. The page should already be clean. We always
1208 * have buffer heads in this call.
1210 * Returns 1 if the page is ok to release, 0 otherwise.
1217 int delalloc, unwritten;
1219 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1221 xfs_count_page_state(page, &delalloc, &unwritten);
1223 if (WARN_ON_ONCE(delalloc))
1225 if (WARN_ON_ONCE(unwritten))
1228 return try_to_free_buffers(page);
1232 * When we map a DIO buffer, we may need to attach an ioend that describes the
1233 * type of write IO we are doing. This passes to the completion function the
1234 * operations it needs to perform. If the mapping is for an overwrite wholly
1235 * within the EOF then we don't need an ioend and so we don't allocate one.
1236 * This avoids the unnecessary overhead of allocating and freeing ioends for
1237 * workloads that don't require transactions on IO completion.
1239 * If we get multiple mappings in a single IO, we might be mapping different
1240 * types. But because the direct IO can only have a single private pointer, we
1241 * need to ensure that:
1243 * a) i) the ioend spans the entire region of unwritten mappings; or
1244 * ii) the ioend spans all the mappings that cross or are beyond EOF; and
1245 * b) if it contains unwritten extents, it is *permanently* marked as such
1247 * We could do this by chaining ioends like buffered IO does, but we only
1248 * actually get one IO completion callback from the direct IO, and that spans
1249 * the entire IO regardless of how many mappings and IOs are needed to complete
1250 * the DIO. There is only going to be one reference to the ioend and its life
1251 * cycle is constrained by the DIO completion code. hence we don't need
1252 * reference counting here.
1254 * Note that for DIO, an IO to the highest supported file block offset (i.e.
1255 * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1256 * bit variable. Hence if we see this overflow, we have to assume that the IO is
1257 * extending the file size. We won't know for sure until IO completion is run
1258 * and the actual max write offset is communicated to the IO completion
1261 * For DAX page faults, we are preparing to never see unwritten extents here,
1262 * nor should we ever extend the inode size. Hence we will soon have nothing to
1263 * do here for this case, ensuring we don't have to provide an IO completion
1264 * callback to free an ioend that we don't actually need for a fault into the
1265 * page at offset (2^63 - 1FSB) bytes.
1270 struct inode *inode,
1271 struct buffer_head *bh_result,
1272 struct xfs_bmbt_irec *imap,
1276 struct xfs_ioend *ioend;
1277 xfs_off_t size = bh_result->b_size;
1280 if (ISUNWRITTEN(imap))
1281 type = XFS_IO_UNWRITTEN;
1283 type = XFS_IO_OVERWRITE;
1285 trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
1288 ASSERT(type == XFS_IO_OVERWRITE);
1289 trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1294 if (bh_result->b_private) {
1295 ioend = bh_result->b_private;
1296 ASSERT(ioend->io_size > 0);
1297 ASSERT(offset >= ioend->io_offset);
1298 if (offset + size > ioend->io_offset + ioend->io_size)
1299 ioend->io_size = offset - ioend->io_offset + size;
1301 if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
1302 ioend->io_type = XFS_IO_UNWRITTEN;
1304 trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
1305 ioend->io_size, ioend->io_type,
1307 } else if (type == XFS_IO_UNWRITTEN ||
1308 offset + size > i_size_read(inode) ||
1309 offset + size < 0) {
1310 ioend = xfs_alloc_ioend(inode, type);
1311 ioend->io_offset = offset;
1312 ioend->io_size = size;
1314 bh_result->b_private = ioend;
1315 set_buffer_defer_completion(bh_result);
1317 trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
1320 trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
1326 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1327 * is, so that we can avoid repeated get_blocks calls.
1329 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1330 * for blocks beyond EOF must be marked new so that sub block regions can be
1331 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1332 * was just allocated or is unwritten, otherwise the callers would overwrite
1333 * existing data with zeros. Hence we have to split the mapping into a range up
1334 * to and including EOF, and a second mapping for beyond EOF.
1338 struct inode *inode,
1340 struct buffer_head *bh_result,
1341 struct xfs_bmbt_irec *imap,
1345 xfs_off_t mapping_size;
1347 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1348 mapping_size <<= inode->i_blkbits;
1350 ASSERT(mapping_size > 0);
1351 if (mapping_size > size)
1352 mapping_size = size;
1353 if (offset < i_size_read(inode) &&
1354 offset + mapping_size >= i_size_read(inode)) {
1355 /* limit mapping to block that spans EOF */
1356 mapping_size = roundup_64(i_size_read(inode) - offset,
1357 1 << inode->i_blkbits);
1359 if (mapping_size > LONG_MAX)
1360 mapping_size = LONG_MAX;
1362 bh_result->b_size = mapping_size;
1367 struct inode *inode,
1369 struct buffer_head *bh_result,
1374 struct xfs_inode *ip = XFS_I(inode);
1375 struct xfs_mount *mp = ip->i_mount;
1376 xfs_fileoff_t offset_fsb, end_fsb;
1379 struct xfs_bmbt_irec imap;
1385 if (XFS_FORCED_SHUTDOWN(mp))
1388 offset = (xfs_off_t)iblock << inode->i_blkbits;
1389 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1390 size = bh_result->b_size;
1392 if (!create && direct && offset >= i_size_read(inode))
1396 * Direct I/O is usually done on preallocated files, so try getting
1397 * a block mapping without an exclusive lock first. For buffered
1398 * writes we already have the exclusive iolock anyway, so avoiding
1399 * a lock roundtrip here by taking the ilock exclusive from the
1400 * beginning is a useful micro optimization.
1402 if (create && !direct) {
1403 lockmode = XFS_ILOCK_EXCL;
1404 xfs_ilock(ip, lockmode);
1406 lockmode = xfs_ilock_data_map_shared(ip);
1409 ASSERT(offset <= mp->m_super->s_maxbytes);
1410 if (offset + size > mp->m_super->s_maxbytes)
1411 size = mp->m_super->s_maxbytes - offset;
1412 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1413 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1415 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1416 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1420 /* for DAX, we convert unwritten extents directly */
1423 (imap.br_startblock == HOLESTARTBLOCK ||
1424 imap.br_startblock == DELAYSTARTBLOCK) ||
1425 (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
1426 if (direct || xfs_get_extsz_hint(ip)) {
1428 * Drop the ilock in preparation for starting the block
1429 * allocation transaction. It will be retaken
1430 * exclusively inside xfs_iomap_write_direct for the
1431 * actual allocation.
1433 xfs_iunlock(ip, lockmode);
1434 error = xfs_iomap_write_direct(ip, offset, size,
1442 * Delalloc reservations do not require a transaction,
1443 * we can go on without dropping the lock here. If we
1444 * are allocating a new delalloc block, make sure that
1445 * we set the new flag so that we mark the buffer new so
1446 * that we know that it is newly allocated if the write
1449 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1451 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1455 xfs_iunlock(ip, lockmode);
1457 trace_xfs_get_blocks_alloc(ip, offset, size,
1458 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1459 : XFS_IO_DELALLOC, &imap);
1460 } else if (nimaps) {
1461 trace_xfs_get_blocks_found(ip, offset, size,
1462 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1463 : XFS_IO_OVERWRITE, &imap);
1464 xfs_iunlock(ip, lockmode);
1466 trace_xfs_get_blocks_notfound(ip, offset, size);
1470 if (IS_DAX(inode) && create) {
1471 ASSERT(!ISUNWRITTEN(&imap));
1472 /* zeroing is not needed at a higher layer */
1476 /* trim mapping down to size requested */
1477 if (direct || size > (1 << inode->i_blkbits))
1478 xfs_map_trim_size(inode, iblock, bh_result,
1479 &imap, offset, size);
1482 * For unwritten extents do not report a disk address in the buffered
1483 * read case (treat as if we're reading into a hole).
1485 if (imap.br_startblock != HOLESTARTBLOCK &&
1486 imap.br_startblock != DELAYSTARTBLOCK &&
1487 (create || !ISUNWRITTEN(&imap))) {
1488 xfs_map_buffer(inode, bh_result, &imap, offset);
1489 if (ISUNWRITTEN(&imap))
1490 set_buffer_unwritten(bh_result);
1491 /* direct IO needs special help */
1492 if (create && direct)
1493 xfs_map_direct(inode, bh_result, &imap, offset,
1498 * If this is a realtime file, data may be on a different device.
1499 * to that pointed to from the buffer_head b_bdev currently.
1501 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1504 * If we previously allocated a block out beyond eof and we are now
1505 * coming back to use it then we will need to flag it as new even if it
1506 * has a disk address.
1508 * With sub-block writes into unwritten extents we also need to mark
1509 * the buffer as new so that the unwritten parts of the buffer gets
1513 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1514 (offset >= i_size_read(inode)) ||
1515 (new || ISUNWRITTEN(&imap))))
1516 set_buffer_new(bh_result);
1518 if (imap.br_startblock == DELAYSTARTBLOCK) {
1521 set_buffer_uptodate(bh_result);
1522 set_buffer_mapped(bh_result);
1523 set_buffer_delay(bh_result);
1530 xfs_iunlock(ip, lockmode);
1536 struct inode *inode,
1538 struct buffer_head *bh_result,
1541 return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
1545 xfs_get_blocks_direct(
1546 struct inode *inode,
1548 struct buffer_head *bh_result,
1551 return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1555 xfs_get_blocks_dax_fault(
1556 struct inode *inode,
1558 struct buffer_head *bh_result,
1561 return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
1565 __xfs_end_io_direct_write(
1566 struct inode *inode,
1567 struct xfs_ioend *ioend,
1571 struct xfs_mount *mp = XFS_I(inode)->i_mount;
1573 if (XFS_FORCED_SHUTDOWN(mp) || ioend->io_error)
1577 * dio completion end_io functions are only called on writes if more
1578 * than 0 bytes was written.
1583 * The ioend only maps whole blocks, while the IO may be sector aligned.
1584 * Hence the ioend offset/size may not match the IO offset/size exactly.
1585 * Because we don't map overwrites within EOF into the ioend, the offset
1586 * may not match, but only if the endio spans EOF. Either way, write
1587 * the IO sizes into the ioend so that completion processing does the
1590 ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
1591 ioend->io_size = size;
1592 ioend->io_offset = offset;
1595 * The ioend tells us whether we are doing unwritten extent conversion
1596 * or an append transaction that updates the on-disk file size. These
1597 * cases are the only cases where we should *potentially* be needing
1598 * to update the VFS inode size.
1600 * We need to update the in-core inode size here so that we don't end up
1601 * with the on-disk inode size being outside the in-core inode size. We
1602 * have no other method of updating EOF for AIO, so always do it here
1605 * We need to lock the test/set EOF update as we can be racing with
1606 * other IO completions here to update the EOF. Failing to serialise
1607 * here can result in EOF moving backwards and Bad Things Happen when
1610 spin_lock(&XFS_I(inode)->i_flags_lock);
1611 if (offset + size > i_size_read(inode))
1612 i_size_write(inode, offset + size);
1613 spin_unlock(&XFS_I(inode)->i_flags_lock);
1616 * If we are doing an append IO that needs to update the EOF on disk,
1617 * do the transaction reserve now so we can use common end io
1618 * processing. Stashing the error (if there is one) in the ioend will
1619 * result in the ioend processing passing on the error if it is
1620 * possible as we can't return it from here.
1622 if (ioend->io_type == XFS_IO_OVERWRITE)
1623 ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
1626 xfs_end_io(&ioend->io_work);
1631 * Complete a direct I/O write request.
1633 * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
1634 * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
1635 * wholly within the EOF and so there is nothing for us to do. Note that in this
1636 * case the completion can be called in interrupt context, whereas if we have an
1637 * ioend we will always be called in task context (i.e. from a workqueue).
1640 xfs_end_io_direct_write(
1646 struct inode *inode = file_inode(iocb->ki_filp);
1647 struct xfs_ioend *ioend = private;
1649 trace_xfs_gbmap_direct_endio(XFS_I(inode), offset, size,
1650 ioend ? ioend->io_type : 0, NULL);
1653 ASSERT(offset + size <= i_size_read(inode));
1657 __xfs_end_io_direct_write(inode, ioend, offset, size);
1660 static inline ssize_t
1662 struct inode *inode,
1664 struct iov_iter *iter,
1666 void (*endio)(struct kiocb *iocb,
1672 struct block_device *bdev;
1675 return dax_do_io(iocb, inode, iter, offset,
1676 xfs_get_blocks_direct, endio, 0);
1678 bdev = xfs_find_bdev_for_inode(inode);
1679 return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
1680 xfs_get_blocks_direct, endio, NULL, flags);
1686 struct iov_iter *iter,
1689 struct inode *inode = iocb->ki_filp->f_mapping->host;
1691 if (iov_iter_rw(iter) == WRITE)
1692 return xfs_vm_do_dio(inode, iocb, iter, offset,
1693 xfs_end_io_direct_write, DIO_ASYNC_EXTEND);
1694 return xfs_vm_do_dio(inode, iocb, iter, offset, NULL, 0);
1698 * Punch out the delalloc blocks we have already allocated.
1700 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1701 * as the page is still locked at this point.
1704 xfs_vm_kill_delalloc_range(
1705 struct inode *inode,
1709 struct xfs_inode *ip = XFS_I(inode);
1710 xfs_fileoff_t start_fsb;
1711 xfs_fileoff_t end_fsb;
1714 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1715 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1716 if (end_fsb <= start_fsb)
1719 xfs_ilock(ip, XFS_ILOCK_EXCL);
1720 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1721 end_fsb - start_fsb);
1723 /* something screwed, just bail */
1724 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1725 xfs_alert(ip->i_mount,
1726 "xfs_vm_write_failed: unable to clean up ino %lld",
1730 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1734 xfs_vm_write_failed(
1735 struct inode *inode,
1740 loff_t block_offset;
1743 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1744 loff_t to = from + len;
1745 struct buffer_head *bh, *head;
1748 * The request pos offset might be 32 or 64 bit, this is all fine
1749 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1750 * platform, the high 32-bit will be masked off if we evaluate the
1751 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1752 * 0xfffff000 as an unsigned long, hence the result is incorrect
1753 * which could cause the following ASSERT failed in most cases.
1754 * In order to avoid this, we can evaluate the block_offset of the
1755 * start of the page by using shifts rather than masks the mismatch
1758 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1760 ASSERT(block_offset + from == pos);
1762 head = page_buffers(page);
1764 for (bh = head; bh != head || !block_start;
1765 bh = bh->b_this_page, block_start = block_end,
1766 block_offset += bh->b_size) {
1767 block_end = block_start + bh->b_size;
1769 /* skip buffers before the write */
1770 if (block_end <= from)
1773 /* if the buffer is after the write, we're done */
1774 if (block_start >= to)
1777 if (!buffer_delay(bh))
1780 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1783 xfs_vm_kill_delalloc_range(inode, block_offset,
1784 block_offset + bh->b_size);
1787 * This buffer does not contain data anymore. make sure anyone
1788 * who finds it knows that for certain.
1790 clear_buffer_delay(bh);
1791 clear_buffer_uptodate(bh);
1792 clear_buffer_mapped(bh);
1793 clear_buffer_new(bh);
1794 clear_buffer_dirty(bh);
1800 * This used to call block_write_begin(), but it unlocks and releases the page
1801 * on error, and we need that page to be able to punch stale delalloc blocks out
1802 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1803 * the appropriate point.
1808 struct address_space *mapping,
1812 struct page **pagep,
1815 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1819 ASSERT(len <= PAGE_CACHE_SIZE);
1821 page = grab_cache_page_write_begin(mapping, index, flags);
1825 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1826 if (unlikely(status)) {
1827 struct inode *inode = mapping->host;
1828 size_t isize = i_size_read(inode);
1830 xfs_vm_write_failed(inode, page, pos, len);
1834 * If the write is beyond EOF, we only want to kill blocks
1835 * allocated in this write, not blocks that were previously
1836 * written successfully.
1838 if (pos + len > isize) {
1839 ssize_t start = max_t(ssize_t, pos, isize);
1841 truncate_pagecache_range(inode, start, pos + len);
1844 page_cache_release(page);
1853 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1854 * this specific write because they will never be written. Previous writes
1855 * beyond EOF where block allocation succeeded do not need to be trashed, so
1856 * only new blocks from this write should be trashed. For blocks within
1857 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1858 * written with all the other valid data.
1863 struct address_space *mapping,
1872 ASSERT(len <= PAGE_CACHE_SIZE);
1874 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1875 if (unlikely(ret < len)) {
1876 struct inode *inode = mapping->host;
1877 size_t isize = i_size_read(inode);
1878 loff_t to = pos + len;
1881 /* only kill blocks in this write beyond EOF */
1884 xfs_vm_kill_delalloc_range(inode, isize, to);
1885 truncate_pagecache_range(inode, isize, to);
1893 struct address_space *mapping,
1896 struct inode *inode = (struct inode *)mapping->host;
1897 struct xfs_inode *ip = XFS_I(inode);
1899 trace_xfs_vm_bmap(XFS_I(inode));
1900 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1901 filemap_write_and_wait(mapping);
1902 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1903 return generic_block_bmap(mapping, block, xfs_get_blocks);
1908 struct file *unused,
1911 return mpage_readpage(page, xfs_get_blocks);
1916 struct file *unused,
1917 struct address_space *mapping,
1918 struct list_head *pages,
1921 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1925 * This is basically a copy of __set_page_dirty_buffers() with one
1926 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1927 * dirty, we'll never be able to clean them because we don't write buffers
1928 * beyond EOF, and that means we can't invalidate pages that span EOF
1929 * that have been marked dirty. Further, the dirty state can leak into
1930 * the file interior if the file is extended, resulting in all sorts of
1931 * bad things happening as the state does not match the underlying data.
1933 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1934 * this only exist because of bufferheads and how the generic code manages them.
1937 xfs_vm_set_page_dirty(
1940 struct address_space *mapping = page->mapping;
1941 struct inode *inode = mapping->host;
1945 struct mem_cgroup *memcg;
1947 if (unlikely(!mapping))
1948 return !TestSetPageDirty(page);
1950 end_offset = i_size_read(inode);
1951 offset = page_offset(page);
1953 spin_lock(&mapping->private_lock);
1954 if (page_has_buffers(page)) {
1955 struct buffer_head *head = page_buffers(page);
1956 struct buffer_head *bh = head;
1959 if (offset < end_offset)
1960 set_buffer_dirty(bh);
1961 bh = bh->b_this_page;
1962 offset += 1 << inode->i_blkbits;
1963 } while (bh != head);
1966 * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
1967 * per-memcg dirty page counters.
1969 memcg = mem_cgroup_begin_page_stat(page);
1970 newly_dirty = !TestSetPageDirty(page);
1971 spin_unlock(&mapping->private_lock);
1974 /* sigh - __set_page_dirty() is static, so copy it here, too */
1975 unsigned long flags;
1977 spin_lock_irqsave(&mapping->tree_lock, flags);
1978 if (page->mapping) { /* Race with truncate? */
1979 WARN_ON_ONCE(!PageUptodate(page));
1980 account_page_dirtied(page, mapping, memcg);
1981 radix_tree_tag_set(&mapping->page_tree,
1982 page_index(page), PAGECACHE_TAG_DIRTY);
1984 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1986 mem_cgroup_end_page_stat(memcg);
1988 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1992 const struct address_space_operations xfs_address_space_operations = {
1993 .readpage = xfs_vm_readpage,
1994 .readpages = xfs_vm_readpages,
1995 .writepage = xfs_vm_writepage,
1996 .writepages = xfs_vm_writepages,
1997 .set_page_dirty = xfs_vm_set_page_dirty,
1998 .releasepage = xfs_vm_releasepage,
1999 .invalidatepage = xfs_vm_invalidatepage,
2000 .write_begin = xfs_vm_write_begin,
2001 .write_end = xfs_vm_write_end,
2002 .bmap = xfs_vm_bmap,
2003 .direct_IO = xfs_vm_direct_IO,
2004 .migratepage = buffer_migrate_page,
2005 .is_partially_uptodate = block_is_partially_uptodate,
2006 .error_remove_page = generic_error_remove_page,