2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include <linux/gfp.h>
35 #include <linux/mpage.h>
36 #include <linux/pagevec.h>
37 #include <linux/writeback.h>
39 /* flags for direct write completions */
40 #define XFS_DIO_FLAG_UNWRITTEN (1 << 0)
41 #define XFS_DIO_FLAG_APPEND (1 << 1)
44 * structure owned by writepages passed to individual writepage calls
46 struct xfs_writepage_ctx {
47 struct xfs_bmbt_irec imap;
50 struct xfs_ioend *ioend;
60 struct buffer_head *bh, *head;
62 *delalloc = *unwritten = 0;
64 bh = head = page_buffers(page);
66 if (buffer_unwritten(bh))
68 else if (buffer_delay(bh))
70 } while ((bh = bh->b_this_page) != head);
74 xfs_find_bdev_for_inode(
77 struct xfs_inode *ip = XFS_I(inode);
78 struct xfs_mount *mp = ip->i_mount;
80 if (XFS_IS_REALTIME_INODE(ip))
81 return mp->m_rtdev_targp->bt_bdev;
83 return mp->m_ddev_targp->bt_bdev;
87 * We're now finished for good with this page. Update the page state via the
88 * associated buffer_heads, paying attention to the start and end offsets that
89 * we need to process on the page.
92 xfs_finish_page_writeback(
97 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
98 struct buffer_head *head, *bh;
101 ASSERT(bvec->bv_offset < PAGE_SIZE);
102 ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
103 ASSERT(end < PAGE_SIZE);
104 ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
106 bh = head = page_buffers(bvec->bv_page);
109 if (off < bvec->bv_offset)
113 bh->b_end_io(bh, !error);
116 } while ((bh = bh->b_this_page) != head);
120 * We're now finished for good with this ioend structure. Update the page
121 * state, release holds on bios, and finally free up memory. Do not use the
126 struct xfs_ioend *ioend,
129 struct inode *inode = ioend->io_inode;
130 struct bio *last = ioend->io_bio;
131 struct bio *bio, *next;
133 for (bio = &ioend->io_inline_bio; bio; bio = next) {
134 struct bio_vec *bvec;
138 * For the last bio, bi_private points to the ioend, so we
139 * need to explicitly end the iteration here.
144 next = bio->bi_private;
146 /* walk each page on bio, ending page IO on them */
147 bio_for_each_segment_all(bvec, bio, i)
148 xfs_finish_page_writeback(inode, bvec, error);
155 * Fast and loose check if this write could update the on-disk inode size.
157 static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
159 return ioend->io_offset + ioend->io_size >
160 XFS_I(ioend->io_inode)->i_d.di_size;
164 xfs_setfilesize_trans_alloc(
165 struct xfs_ioend *ioend)
167 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
168 struct xfs_trans *tp;
171 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
175 ioend->io_append_trans = tp;
178 * We may pass freeze protection with a transaction. So tell lockdep
181 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
183 * We hand off the transaction to the completion thread now, so
184 * clear the flag here.
186 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
191 * Update on-disk file size now that data has been written to disk.
195 struct xfs_inode *ip,
196 struct xfs_trans *tp,
202 xfs_ilock(ip, XFS_ILOCK_EXCL);
203 isize = xfs_new_eof(ip, offset + size);
205 xfs_iunlock(ip, XFS_ILOCK_EXCL);
206 xfs_trans_cancel(tp);
210 trace_xfs_setfilesize(ip, offset, size);
212 ip->i_d.di_size = isize;
213 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
214 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
216 return xfs_trans_commit(tp);
220 xfs_setfilesize_ioend(
221 struct xfs_ioend *ioend,
224 struct xfs_inode *ip = XFS_I(ioend->io_inode);
225 struct xfs_trans *tp = ioend->io_append_trans;
228 * The transaction may have been allocated in the I/O submission thread,
229 * thus we need to mark ourselves as being in a transaction manually.
230 * Similarly for freeze protection.
232 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
233 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
235 /* we abort the update if there was an IO error */
237 xfs_trans_cancel(tp);
241 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
245 * IO write completion.
249 struct work_struct *work)
251 struct xfs_ioend *ioend =
252 container_of(work, struct xfs_ioend, io_work);
253 struct xfs_inode *ip = XFS_I(ioend->io_inode);
254 int error = ioend->io_bio->bi_error;
257 * Set an error if the mount has shut down and proceed with end I/O
258 * processing so it can perform whatever cleanups are necessary.
260 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
264 * For unwritten extents we need to issue transactions to convert a
265 * range to normal written extens after the data I/O has finished.
266 * Detecting and handling completion IO errors is done individually
267 * for each case as different cleanup operations need to be performed
270 if (ioend->io_type == XFS_IO_UNWRITTEN) {
273 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
275 } else if (ioend->io_append_trans) {
276 error = xfs_setfilesize_ioend(ioend, error);
278 ASSERT(!xfs_ioend_is_append(ioend));
282 xfs_destroy_ioend(ioend, error);
289 struct xfs_ioend *ioend = bio->bi_private;
290 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
292 if (ioend->io_type == XFS_IO_UNWRITTEN)
293 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
294 else if (ioend->io_append_trans)
295 queue_work(mp->m_data_workqueue, &ioend->io_work);
297 xfs_destroy_ioend(ioend, bio->bi_error);
304 struct xfs_bmbt_irec *imap,
307 struct xfs_inode *ip = XFS_I(inode);
308 struct xfs_mount *mp = ip->i_mount;
309 ssize_t count = 1 << inode->i_blkbits;
310 xfs_fileoff_t offset_fsb, end_fsb;
312 int bmapi_flags = XFS_BMAPI_ENTIRE;
315 if (XFS_FORCED_SHUTDOWN(mp))
318 if (type == XFS_IO_UNWRITTEN)
319 bmapi_flags |= XFS_BMAPI_IGSTATE;
321 xfs_ilock(ip, XFS_ILOCK_SHARED);
322 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
323 (ip->i_df.if_flags & XFS_IFEXTENTS));
324 ASSERT(offset <= mp->m_super->s_maxbytes);
326 if (offset + count > mp->m_super->s_maxbytes)
327 count = mp->m_super->s_maxbytes - offset;
328 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
329 offset_fsb = XFS_B_TO_FSBT(mp, offset);
330 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
331 imap, &nimaps, bmapi_flags);
332 xfs_iunlock(ip, XFS_ILOCK_SHARED);
337 if (type == XFS_IO_DELALLOC &&
338 (!nimaps || isnullstartblock(imap->br_startblock))) {
339 error = xfs_iomap_write_allocate(ip, offset, imap);
341 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
346 if (type == XFS_IO_UNWRITTEN) {
348 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
349 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
353 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
360 struct xfs_bmbt_irec *imap,
363 offset >>= inode->i_blkbits;
365 return offset >= imap->br_startoff &&
366 offset < imap->br_startoff + imap->br_blockcount;
370 xfs_start_buffer_writeback(
371 struct buffer_head *bh)
373 ASSERT(buffer_mapped(bh));
374 ASSERT(buffer_locked(bh));
375 ASSERT(!buffer_delay(bh));
376 ASSERT(!buffer_unwritten(bh));
378 mark_buffer_async_write(bh);
379 set_buffer_uptodate(bh);
380 clear_buffer_dirty(bh);
384 xfs_start_page_writeback(
388 ASSERT(PageLocked(page));
389 ASSERT(!PageWriteback(page));
392 * if the page was not fully cleaned, we need to ensure that the higher
393 * layers come back to it correctly. That means we need to keep the page
394 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
395 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
396 * write this page in this writeback sweep will be made.
399 clear_page_dirty_for_io(page);
400 set_page_writeback(page);
402 set_page_writeback_keepwrite(page);
407 static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
409 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
413 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
414 * it, and we submit that bio. The ioend may be used for multiple bio
415 * submissions, so we only want to allocate an append transaction for the ioend
416 * once. In the case of multiple bio submission, each bio will take an IO
417 * reference to the ioend to ensure that the ioend completion is only done once
418 * all bios have been submitted and the ioend is really done.
420 * If @fail is non-zero, it means that we have a situation where some part of
421 * the submission process has failed after we have marked paged for writeback
422 * and unlocked them. In this situation, we need to fail the bio and ioend
423 * rather than submit it to IO. This typically only happens on a filesystem
428 struct writeback_control *wbc,
429 struct xfs_ioend *ioend,
432 /* Reserve log space if we might write beyond the on-disk inode size. */
434 ioend->io_type != XFS_IO_UNWRITTEN &&
435 xfs_ioend_is_append(ioend) &&
436 !ioend->io_append_trans)
437 status = xfs_setfilesize_trans_alloc(ioend);
439 ioend->io_bio->bi_private = ioend;
440 ioend->io_bio->bi_end_io = xfs_end_bio;
441 bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
442 (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
444 * If we are failing the IO now, just mark the ioend with an
445 * error and finish it. This will run IO completion immediately
446 * as there is only one reference to the ioend at this point in
450 ioend->io_bio->bi_error = status;
451 bio_endio(ioend->io_bio);
455 submit_bio(ioend->io_bio);
460 xfs_init_bio_from_bh(
462 struct buffer_head *bh)
464 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
465 bio->bi_bdev = bh->b_bdev;
468 static struct xfs_ioend *
473 struct buffer_head *bh)
475 struct xfs_ioend *ioend;
478 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
479 xfs_init_bio_from_bh(bio, bh);
481 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
482 INIT_LIST_HEAD(&ioend->io_list);
483 ioend->io_type = type;
484 ioend->io_inode = inode;
486 ioend->io_offset = offset;
487 INIT_WORK(&ioend->io_work, xfs_end_io);
488 ioend->io_append_trans = NULL;
494 * Allocate a new bio, and chain the old bio to the new one.
496 * Note that we have to do perform the chaining in this unintuitive order
497 * so that the bi_private linkage is set up in the right direction for the
498 * traversal in xfs_destroy_ioend().
502 struct xfs_ioend *ioend,
503 struct writeback_control *wbc,
504 struct buffer_head *bh)
508 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
509 xfs_init_bio_from_bh(new, bh);
511 bio_chain(ioend->io_bio, new);
512 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
513 bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
514 (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
515 submit_bio(ioend->io_bio);
520 * Test to see if we've been building up a completion structure for
521 * earlier buffers -- if so, we try to append to this ioend if we
522 * can, otherwise we finish off any current ioend and start another.
523 * Return the ioend we finished off so that the caller can submit it
524 * once it has finished processing the dirty page.
529 struct buffer_head *bh,
531 struct xfs_writepage_ctx *wpc,
532 struct writeback_control *wbc,
533 struct list_head *iolist)
535 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
536 bh->b_blocknr != wpc->last_block + 1 ||
537 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
539 list_add(&wpc->ioend->io_list, iolist);
540 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
544 * If the buffer doesn't fit into the bio we need to allocate a new
545 * one. This shouldn't happen more than once for a given buffer.
547 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
548 xfs_chain_bio(wpc->ioend, wbc, bh);
550 wpc->ioend->io_size += bh->b_size;
551 wpc->last_block = bh->b_blocknr;
552 xfs_start_buffer_writeback(bh);
558 struct buffer_head *bh,
559 struct xfs_bmbt_irec *imap,
563 struct xfs_mount *m = XFS_I(inode)->i_mount;
564 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
565 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
567 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
568 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
570 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
571 ((offset - iomap_offset) >> inode->i_blkbits);
573 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
576 set_buffer_mapped(bh);
582 struct buffer_head *bh,
583 struct xfs_bmbt_irec *imap,
586 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
587 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
589 xfs_map_buffer(inode, bh, imap, offset);
590 set_buffer_mapped(bh);
591 clear_buffer_delay(bh);
592 clear_buffer_unwritten(bh);
596 * Test if a given page contains at least one buffer of a given @type.
597 * If @check_all_buffers is true, then we walk all the buffers in the page to
598 * try to find one of the type passed in. If it is not set, then the caller only
599 * needs to check the first buffer on the page for a match.
605 bool check_all_buffers)
607 struct buffer_head *bh;
608 struct buffer_head *head;
610 if (PageWriteback(page))
614 if (!page_has_buffers(page))
617 bh = head = page_buffers(page);
619 if (buffer_unwritten(bh)) {
620 if (type == XFS_IO_UNWRITTEN)
622 } else if (buffer_delay(bh)) {
623 if (type == XFS_IO_DELALLOC)
625 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
626 if (type == XFS_IO_OVERWRITE)
630 /* If we are only checking the first buffer, we are done now. */
631 if (!check_all_buffers)
633 } while ((bh = bh->b_this_page) != head);
639 xfs_vm_invalidatepage(
644 trace_xfs_invalidatepage(page->mapping->host, page, offset,
646 block_invalidatepage(page, offset, length);
650 * If the page has delalloc buffers on it, we need to punch them out before we
651 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
652 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
653 * is done on that same region - the delalloc extent is returned when none is
654 * supposed to be there.
656 * We prevent this by truncating away the delalloc regions on the page before
657 * invalidating it. Because they are delalloc, we can do this without needing a
658 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
659 * truncation without a transaction as there is no space left for block
660 * reservation (typically why we see a ENOSPC in writeback).
662 * This is not a performance critical path, so for now just do the punching a
663 * buffer head at a time.
666 xfs_aops_discard_page(
669 struct inode *inode = page->mapping->host;
670 struct xfs_inode *ip = XFS_I(inode);
671 struct buffer_head *bh, *head;
672 loff_t offset = page_offset(page);
674 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
677 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
680 xfs_alert(ip->i_mount,
681 "page discard on page %p, inode 0x%llx, offset %llu.",
682 page, ip->i_ino, offset);
684 xfs_ilock(ip, XFS_ILOCK_EXCL);
685 bh = head = page_buffers(page);
688 xfs_fileoff_t start_fsb;
690 if (!buffer_delay(bh))
693 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
694 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
696 /* something screwed, just bail */
697 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
698 xfs_alert(ip->i_mount,
699 "page discard unable to remove delalloc mapping.");
704 offset += 1 << inode->i_blkbits;
706 } while ((bh = bh->b_this_page) != head);
708 xfs_iunlock(ip, XFS_ILOCK_EXCL);
710 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
715 * We implement an immediate ioend submission policy here to avoid needing to
716 * chain multiple ioends and hence nest mempool allocations which can violate
717 * forward progress guarantees we need to provide. The current ioend we are
718 * adding buffers to is cached on the writepage context, and if the new buffer
719 * does not append to the cached ioend it will create a new ioend and cache that
722 * If a new ioend is created and cached, the old ioend is returned and queued
723 * locally for submission once the entire page is processed or an error has been
724 * detected. While ioends are submitted immediately after they are completed,
725 * batching optimisations are provided by higher level block plugging.
727 * At the end of a writeback pass, there will be a cached ioend remaining on the
728 * writepage context that the caller will need to submit.
732 struct xfs_writepage_ctx *wpc,
733 struct writeback_control *wbc,
737 __uint64_t end_offset)
739 LIST_HEAD(submit_list);
740 struct xfs_ioend *ioend, *next;
741 struct buffer_head *bh, *head;
742 ssize_t len = 1 << inode->i_blkbits;
747 bh = head = page_buffers(page);
748 offset = page_offset(page);
750 if (offset >= end_offset)
752 if (!buffer_uptodate(bh))
756 * set_page_dirty dirties all buffers in a page, independent
757 * of their state. The dirty state however is entirely
758 * meaningless for holes (!mapped && uptodate), so skip
759 * buffers covering holes here.
761 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
762 wpc->imap_valid = false;
766 if (buffer_unwritten(bh)) {
767 if (wpc->io_type != XFS_IO_UNWRITTEN) {
768 wpc->io_type = XFS_IO_UNWRITTEN;
769 wpc->imap_valid = false;
771 } else if (buffer_delay(bh)) {
772 if (wpc->io_type != XFS_IO_DELALLOC) {
773 wpc->io_type = XFS_IO_DELALLOC;
774 wpc->imap_valid = false;
776 } else if (buffer_uptodate(bh)) {
777 if (wpc->io_type != XFS_IO_OVERWRITE) {
778 wpc->io_type = XFS_IO_OVERWRITE;
779 wpc->imap_valid = false;
782 if (PageUptodate(page))
783 ASSERT(buffer_mapped(bh));
785 * This buffer is not uptodate and will not be
786 * written to disk. Ensure that we will put any
787 * subsequent writeable buffers into a new
790 wpc->imap_valid = false;
795 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
797 if (!wpc->imap_valid) {
798 error = xfs_map_blocks(inode, offset, &wpc->imap,
802 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
805 if (wpc->imap_valid) {
807 if (wpc->io_type != XFS_IO_OVERWRITE)
808 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
809 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
813 } while (offset += len, ((bh = bh->b_this_page) != head));
815 if (uptodate && bh == head)
816 SetPageUptodate(page);
818 ASSERT(wpc->ioend || list_empty(&submit_list));
822 * On error, we have to fail the ioend here because we have locked
823 * buffers in the ioend. If we don't do this, we'll deadlock
824 * invalidating the page as that tries to lock the buffers on the page.
825 * Also, because we may have set pages under writeback, we have to make
826 * sure we run IO completion to mark the error state of the IO
827 * appropriately, so we can't cancel the ioend directly here. That means
828 * we have to mark this page as under writeback if we included any
829 * buffers from it in the ioend chain so that completion treats it
832 * If we didn't include the page in the ioend, the on error we can
833 * simply discard and unlock it as there are no other users of the page
834 * or it's buffers right now. The caller will still need to trigger
835 * submission of outstanding ioends on the writepage context so they are
836 * treated correctly on error.
839 xfs_start_page_writeback(page, !error);
842 * Preserve the original error if there was one, otherwise catch
843 * submission errors here and propagate into subsequent ioend
846 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
849 list_del_init(&ioend->io_list);
850 error2 = xfs_submit_ioend(wbc, ioend, error);
851 if (error2 && !error)
855 xfs_aops_discard_page(page);
856 ClearPageUptodate(page);
860 * We can end up here with no error and nothing to write if we
861 * race with a partial page truncate on a sub-page block sized
862 * filesystem. In that case we need to mark the page clean.
864 xfs_start_page_writeback(page, 1);
865 end_page_writeback(page);
868 mapping_set_error(page->mapping, error);
873 * Write out a dirty page.
875 * For delalloc space on the page we need to allocate space and flush it.
876 * For unwritten space on the page we need to start the conversion to
877 * regular allocated space.
878 * For any other dirty buffer heads on the page we should flush them.
883 struct writeback_control *wbc,
886 struct xfs_writepage_ctx *wpc = data;
887 struct inode *inode = page->mapping->host;
889 __uint64_t end_offset;
892 trace_xfs_writepage(inode, page, 0, 0);
894 ASSERT(page_has_buffers(page));
897 * Refuse to write the page out if we are called from reclaim context.
899 * This avoids stack overflows when called from deeply used stacks in
900 * random callers for direct reclaim or memcg reclaim. We explicitly
901 * allow reclaim from kswapd as the stack usage there is relatively low.
903 * This should never happen except in the case of a VM regression so
906 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
911 * Given that we do not allow direct reclaim to call us, we should
912 * never be called while in a filesystem transaction.
914 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
918 * Is this page beyond the end of the file?
920 * The page index is less than the end_index, adjust the end_offset
921 * to the highest offset that this page should represent.
922 * -----------------------------------------------------
923 * | file mapping | <EOF> |
924 * -----------------------------------------------------
925 * | Page ... | Page N-2 | Page N-1 | Page N | |
926 * ^--------------------------------^----------|--------
927 * | desired writeback range | see else |
928 * ---------------------------------^------------------|
930 offset = i_size_read(inode);
931 end_index = offset >> PAGE_SHIFT;
932 if (page->index < end_index)
933 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
936 * Check whether the page to write out is beyond or straddles
938 * -------------------------------------------------------
939 * | file mapping | <EOF> |
940 * -------------------------------------------------------
941 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
942 * ^--------------------------------^-----------|---------
944 * ---------------------------------^-----------|--------|
946 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
949 * Skip the page if it is fully outside i_size, e.g. due to a
950 * truncate operation that is in progress. We must redirty the
951 * page so that reclaim stops reclaiming it. Otherwise
952 * xfs_vm_releasepage() is called on it and gets confused.
954 * Note that the end_index is unsigned long, it would overflow
955 * if the given offset is greater than 16TB on 32-bit system
956 * and if we do check the page is fully outside i_size or not
957 * via "if (page->index >= end_index + 1)" as "end_index + 1"
958 * will be evaluated to 0. Hence this page will be redirtied
959 * and be written out repeatedly which would result in an
960 * infinite loop, the user program that perform this operation
961 * will hang. Instead, we can verify this situation by checking
962 * if the page to write is totally beyond the i_size or if it's
963 * offset is just equal to the EOF.
965 if (page->index > end_index ||
966 (page->index == end_index && offset_into_page == 0))
970 * The page straddles i_size. It must be zeroed out on each
971 * and every writepage invocation because it may be mmapped.
972 * "A file is mapped in multiples of the page size. For a file
973 * that is not a multiple of the page size, the remaining
974 * memory is zeroed when mapped, and writes to that region are
975 * not written out to the file."
977 zero_user_segment(page, offset_into_page, PAGE_SIZE);
979 /* Adjust the end_offset to the end of file */
983 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
986 redirty_page_for_writepage(wbc, page);
994 struct writeback_control *wbc)
996 struct xfs_writepage_ctx wpc = {
997 .io_type = XFS_IO_INVALID,
1001 ret = xfs_do_writepage(page, wbc, &wpc);
1003 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1009 struct address_space *mapping,
1010 struct writeback_control *wbc)
1012 struct xfs_writepage_ctx wpc = {
1013 .io_type = XFS_IO_INVALID,
1017 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1018 if (dax_mapping(mapping))
1019 return dax_writeback_mapping_range(mapping,
1020 xfs_find_bdev_for_inode(mapping->host), wbc);
1022 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1024 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1029 * Called to move a page into cleanable state - and from there
1030 * to be released. The page should already be clean. We always
1031 * have buffer heads in this call.
1033 * Returns 1 if the page is ok to release, 0 otherwise.
1040 int delalloc, unwritten;
1042 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1044 xfs_count_page_state(page, &delalloc, &unwritten);
1046 if (WARN_ON_ONCE(delalloc))
1048 if (WARN_ON_ONCE(unwritten))
1051 return try_to_free_buffers(page);
1055 * When we map a DIO buffer, we may need to pass flags to
1056 * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
1058 * Note that for DIO, an IO to the highest supported file block offset (i.e.
1059 * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
1060 * bit variable. Hence if we see this overflow, we have to assume that the IO is
1061 * extending the file size. We won't know for sure until IO completion is run
1062 * and the actual max write offset is communicated to the IO completion
1067 struct inode *inode,
1068 struct buffer_head *bh_result,
1069 struct xfs_bmbt_irec *imap,
1072 uintptr_t *flags = (uintptr_t *)&bh_result->b_private;
1073 xfs_off_t size = bh_result->b_size;
1075 trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
1076 ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, imap);
1078 if (ISUNWRITTEN(imap)) {
1079 *flags |= XFS_DIO_FLAG_UNWRITTEN;
1080 set_buffer_defer_completion(bh_result);
1081 } else if (offset + size > i_size_read(inode) || offset + size < 0) {
1082 *flags |= XFS_DIO_FLAG_APPEND;
1083 set_buffer_defer_completion(bh_result);
1088 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1089 * is, so that we can avoid repeated get_blocks calls.
1091 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1092 * for blocks beyond EOF must be marked new so that sub block regions can be
1093 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1094 * was just allocated or is unwritten, otherwise the callers would overwrite
1095 * existing data with zeros. Hence we have to split the mapping into a range up
1096 * to and including EOF, and a second mapping for beyond EOF.
1100 struct inode *inode,
1102 struct buffer_head *bh_result,
1103 struct xfs_bmbt_irec *imap,
1107 xfs_off_t mapping_size;
1109 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1110 mapping_size <<= inode->i_blkbits;
1112 ASSERT(mapping_size > 0);
1113 if (mapping_size > size)
1114 mapping_size = size;
1115 if (offset < i_size_read(inode) &&
1116 offset + mapping_size >= i_size_read(inode)) {
1117 /* limit mapping to block that spans EOF */
1118 mapping_size = roundup_64(i_size_read(inode) - offset,
1119 1 << inode->i_blkbits);
1121 if (mapping_size > LONG_MAX)
1122 mapping_size = LONG_MAX;
1124 bh_result->b_size = mapping_size;
1129 struct inode *inode,
1131 struct buffer_head *bh_result,
1136 struct xfs_inode *ip = XFS_I(inode);
1137 struct xfs_mount *mp = ip->i_mount;
1138 xfs_fileoff_t offset_fsb, end_fsb;
1141 struct xfs_bmbt_irec imap;
1147 if (XFS_FORCED_SHUTDOWN(mp))
1150 offset = (xfs_off_t)iblock << inode->i_blkbits;
1151 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1152 size = bh_result->b_size;
1154 if (!create && direct && offset >= i_size_read(inode))
1158 * Direct I/O is usually done on preallocated files, so try getting
1159 * a block mapping without an exclusive lock first. For buffered
1160 * writes we already have the exclusive iolock anyway, so avoiding
1161 * a lock roundtrip here by taking the ilock exclusive from the
1162 * beginning is a useful micro optimization.
1164 if (create && !direct) {
1165 lockmode = XFS_ILOCK_EXCL;
1166 xfs_ilock(ip, lockmode);
1168 lockmode = xfs_ilock_data_map_shared(ip);
1171 ASSERT(offset <= mp->m_super->s_maxbytes);
1172 if (offset + size > mp->m_super->s_maxbytes)
1173 size = mp->m_super->s_maxbytes - offset;
1174 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1175 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1177 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1178 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1182 /* for DAX, we convert unwritten extents directly */
1185 (imap.br_startblock == HOLESTARTBLOCK ||
1186 imap.br_startblock == DELAYSTARTBLOCK) ||
1187 (IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
1188 if (direct || xfs_get_extsz_hint(ip)) {
1190 * xfs_iomap_write_direct() expects the shared lock. It
1191 * is unlocked on return.
1193 if (lockmode == XFS_ILOCK_EXCL)
1194 xfs_ilock_demote(ip, lockmode);
1196 error = xfs_iomap_write_direct(ip, offset, size,
1204 * Delalloc reservations do not require a transaction,
1205 * we can go on without dropping the lock here. If we
1206 * are allocating a new delalloc block, make sure that
1207 * we set the new flag so that we mark the buffer new so
1208 * that we know that it is newly allocated if the write
1211 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1213 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1217 xfs_iunlock(ip, lockmode);
1219 trace_xfs_get_blocks_alloc(ip, offset, size,
1220 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1221 : XFS_IO_DELALLOC, &imap);
1222 } else if (nimaps) {
1223 trace_xfs_get_blocks_found(ip, offset, size,
1224 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1225 : XFS_IO_OVERWRITE, &imap);
1226 xfs_iunlock(ip, lockmode);
1228 trace_xfs_get_blocks_notfound(ip, offset, size);
1232 if (IS_DAX(inode) && create) {
1233 ASSERT(!ISUNWRITTEN(&imap));
1234 /* zeroing is not needed at a higher layer */
1238 /* trim mapping down to size requested */
1239 if (direct || size > (1 << inode->i_blkbits))
1240 xfs_map_trim_size(inode, iblock, bh_result,
1241 &imap, offset, size);
1244 * For unwritten extents do not report a disk address in the buffered
1245 * read case (treat as if we're reading into a hole).
1247 if (imap.br_startblock != HOLESTARTBLOCK &&
1248 imap.br_startblock != DELAYSTARTBLOCK &&
1249 (create || !ISUNWRITTEN(&imap))) {
1250 xfs_map_buffer(inode, bh_result, &imap, offset);
1251 if (ISUNWRITTEN(&imap))
1252 set_buffer_unwritten(bh_result);
1253 /* direct IO needs special help */
1254 if (create && direct) {
1256 ASSERT(!ISUNWRITTEN(&imap));
1258 xfs_map_direct(inode, bh_result, &imap, offset);
1263 * If this is a realtime file, data may be on a different device.
1264 * to that pointed to from the buffer_head b_bdev currently.
1266 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1269 * If we previously allocated a block out beyond eof and we are now
1270 * coming back to use it then we will need to flag it as new even if it
1271 * has a disk address.
1273 * With sub-block writes into unwritten extents we also need to mark
1274 * the buffer as new so that the unwritten parts of the buffer gets
1278 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1279 (offset >= i_size_read(inode)) ||
1280 (new || ISUNWRITTEN(&imap))))
1281 set_buffer_new(bh_result);
1283 if (imap.br_startblock == DELAYSTARTBLOCK) {
1286 set_buffer_uptodate(bh_result);
1287 set_buffer_mapped(bh_result);
1288 set_buffer_delay(bh_result);
1295 xfs_iunlock(ip, lockmode);
1301 struct inode *inode,
1303 struct buffer_head *bh_result,
1306 return __xfs_get_blocks(inode, iblock, bh_result, create, false, false);
1310 xfs_get_blocks_direct(
1311 struct inode *inode,
1313 struct buffer_head *bh_result,
1316 return __xfs_get_blocks(inode, iblock, bh_result, create, true, false);
1320 xfs_get_blocks_dax_fault(
1321 struct inode *inode,
1323 struct buffer_head *bh_result,
1326 return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
1330 * Complete a direct I/O write request.
1332 * xfs_map_direct passes us some flags in the private data to tell us what to
1333 * do. If no flags are set, then the write IO is an overwrite wholly within
1334 * the existing allocated file size and so there is nothing for us to do.
1336 * Note that in this case the completion can be called in interrupt context,
1337 * whereas if we have flags set we will always be called in task context
1338 * (i.e. from a workqueue).
1341 xfs_end_io_direct_write(
1347 struct inode *inode = file_inode(iocb->ki_filp);
1348 struct xfs_inode *ip = XFS_I(inode);
1349 struct xfs_mount *mp = ip->i_mount;
1350 uintptr_t flags = (uintptr_t)private;
1353 trace_xfs_end_io_direct_write(ip, offset, size);
1355 if (XFS_FORCED_SHUTDOWN(mp))
1362 * The flags tell us whether we are doing unwritten extent conversions
1363 * or an append transaction that updates the on-disk file size. These
1364 * cases are the only cases where we should *potentially* be needing
1365 * to update the VFS inode size.
1368 ASSERT(offset + size <= i_size_read(inode));
1373 * We need to update the in-core inode size here so that we don't end up
1374 * with the on-disk inode size being outside the in-core inode size. We
1375 * have no other method of updating EOF for AIO, so always do it here
1378 * We need to lock the test/set EOF update as we can be racing with
1379 * other IO completions here to update the EOF. Failing to serialise
1380 * here can result in EOF moving backwards and Bad Things Happen when
1383 spin_lock(&ip->i_flags_lock);
1384 if (offset + size > i_size_read(inode))
1385 i_size_write(inode, offset + size);
1386 spin_unlock(&ip->i_flags_lock);
1388 if (flags & XFS_DIO_FLAG_UNWRITTEN) {
1389 trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
1391 error = xfs_iomap_write_unwritten(ip, offset, size);
1392 } else if (flags & XFS_DIO_FLAG_APPEND) {
1393 struct xfs_trans *tp;
1395 trace_xfs_end_io_direct_write_append(ip, offset, size);
1397 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0,
1400 error = xfs_setfilesize(ip, tp, offset, size);
1409 struct iov_iter *iter)
1411 struct inode *inode = iocb->ki_filp->f_mapping->host;
1412 dio_iodone_t *endio = NULL;
1414 struct block_device *bdev;
1416 if (iov_iter_rw(iter) == WRITE) {
1417 endio = xfs_end_io_direct_write;
1418 flags = DIO_ASYNC_EXTEND;
1421 if (IS_DAX(inode)) {
1422 return dax_do_io(iocb, inode, iter,
1423 xfs_get_blocks_direct, endio, 0);
1426 bdev = xfs_find_bdev_for_inode(inode);
1427 return __blockdev_direct_IO(iocb, inode, bdev, iter,
1428 xfs_get_blocks_direct, endio, NULL, flags);
1432 * Punch out the delalloc blocks we have already allocated.
1434 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1435 * as the page is still locked at this point.
1438 xfs_vm_kill_delalloc_range(
1439 struct inode *inode,
1443 struct xfs_inode *ip = XFS_I(inode);
1444 xfs_fileoff_t start_fsb;
1445 xfs_fileoff_t end_fsb;
1448 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1449 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1450 if (end_fsb <= start_fsb)
1453 xfs_ilock(ip, XFS_ILOCK_EXCL);
1454 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1455 end_fsb - start_fsb);
1457 /* something screwed, just bail */
1458 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1459 xfs_alert(ip->i_mount,
1460 "xfs_vm_write_failed: unable to clean up ino %lld",
1464 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1468 xfs_vm_write_failed(
1469 struct inode *inode,
1474 loff_t block_offset;
1477 loff_t from = pos & (PAGE_SIZE - 1);
1478 loff_t to = from + len;
1479 struct buffer_head *bh, *head;
1480 struct xfs_mount *mp = XFS_I(inode)->i_mount;
1483 * The request pos offset might be 32 or 64 bit, this is all fine
1484 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1485 * platform, the high 32-bit will be masked off if we evaluate the
1486 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1487 * 0xfffff000 as an unsigned long, hence the result is incorrect
1488 * which could cause the following ASSERT failed in most cases.
1489 * In order to avoid this, we can evaluate the block_offset of the
1490 * start of the page by using shifts rather than masks the mismatch
1493 block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
1495 ASSERT(block_offset + from == pos);
1497 head = page_buffers(page);
1499 for (bh = head; bh != head || !block_start;
1500 bh = bh->b_this_page, block_start = block_end,
1501 block_offset += bh->b_size) {
1502 block_end = block_start + bh->b_size;
1504 /* skip buffers before the write */
1505 if (block_end <= from)
1508 /* if the buffer is after the write, we're done */
1509 if (block_start >= to)
1513 * Process delalloc and unwritten buffers beyond EOF. We can
1514 * encounter unwritten buffers in the event that a file has
1515 * post-EOF unwritten extents and an extending write happens to
1516 * fail (e.g., an unaligned write that also involves a delalloc
1517 * to the same page).
1519 if (!buffer_delay(bh) && !buffer_unwritten(bh))
1522 if (!xfs_mp_fail_writes(mp) && !buffer_new(bh) &&
1523 block_offset < i_size_read(inode))
1526 if (buffer_delay(bh))
1527 xfs_vm_kill_delalloc_range(inode, block_offset,
1528 block_offset + bh->b_size);
1531 * This buffer does not contain data anymore. make sure anyone
1532 * who finds it knows that for certain.
1534 clear_buffer_delay(bh);
1535 clear_buffer_uptodate(bh);
1536 clear_buffer_mapped(bh);
1537 clear_buffer_new(bh);
1538 clear_buffer_dirty(bh);
1539 clear_buffer_unwritten(bh);
1545 * This used to call block_write_begin(), but it unlocks and releases the page
1546 * on error, and we need that page to be able to punch stale delalloc blocks out
1547 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1548 * the appropriate point.
1553 struct address_space *mapping,
1557 struct page **pagep,
1560 pgoff_t index = pos >> PAGE_SHIFT;
1563 struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
1565 ASSERT(len <= PAGE_SIZE);
1567 page = grab_cache_page_write_begin(mapping, index, flags);
1571 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1572 if (xfs_mp_fail_writes(mp))
1574 if (unlikely(status)) {
1575 struct inode *inode = mapping->host;
1576 size_t isize = i_size_read(inode);
1578 xfs_vm_write_failed(inode, page, pos, len);
1582 * If the write is beyond EOF, we only want to kill blocks
1583 * allocated in this write, not blocks that were previously
1584 * written successfully.
1586 if (xfs_mp_fail_writes(mp))
1588 if (pos + len > isize) {
1589 ssize_t start = max_t(ssize_t, pos, isize);
1591 truncate_pagecache_range(inode, start, pos + len);
1603 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1604 * this specific write because they will never be written. Previous writes
1605 * beyond EOF where block allocation succeeded do not need to be trashed, so
1606 * only new blocks from this write should be trashed. For blocks within
1607 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1608 * written with all the other valid data.
1613 struct address_space *mapping,
1622 ASSERT(len <= PAGE_SIZE);
1624 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1625 if (unlikely(ret < len)) {
1626 struct inode *inode = mapping->host;
1627 size_t isize = i_size_read(inode);
1628 loff_t to = pos + len;
1631 /* only kill blocks in this write beyond EOF */
1634 xfs_vm_kill_delalloc_range(inode, isize, to);
1635 truncate_pagecache_range(inode, isize, to);
1643 struct address_space *mapping,
1646 struct inode *inode = (struct inode *)mapping->host;
1647 struct xfs_inode *ip = XFS_I(inode);
1649 trace_xfs_vm_bmap(XFS_I(inode));
1650 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1651 filemap_write_and_wait(mapping);
1652 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1653 return generic_block_bmap(mapping, block, xfs_get_blocks);
1658 struct file *unused,
1661 trace_xfs_vm_readpage(page->mapping->host, 1);
1662 return mpage_readpage(page, xfs_get_blocks);
1667 struct file *unused,
1668 struct address_space *mapping,
1669 struct list_head *pages,
1672 trace_xfs_vm_readpages(mapping->host, nr_pages);
1673 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1677 * This is basically a copy of __set_page_dirty_buffers() with one
1678 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1679 * dirty, we'll never be able to clean them because we don't write buffers
1680 * beyond EOF, and that means we can't invalidate pages that span EOF
1681 * that have been marked dirty. Further, the dirty state can leak into
1682 * the file interior if the file is extended, resulting in all sorts of
1683 * bad things happening as the state does not match the underlying data.
1685 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1686 * this only exist because of bufferheads and how the generic code manages them.
1689 xfs_vm_set_page_dirty(
1692 struct address_space *mapping = page->mapping;
1693 struct inode *inode = mapping->host;
1698 if (unlikely(!mapping))
1699 return !TestSetPageDirty(page);
1701 end_offset = i_size_read(inode);
1702 offset = page_offset(page);
1704 spin_lock(&mapping->private_lock);
1705 if (page_has_buffers(page)) {
1706 struct buffer_head *head = page_buffers(page);
1707 struct buffer_head *bh = head;
1710 if (offset < end_offset)
1711 set_buffer_dirty(bh);
1712 bh = bh->b_this_page;
1713 offset += 1 << inode->i_blkbits;
1714 } while (bh != head);
1717 * Lock out page->mem_cgroup migration to keep PageDirty
1718 * synchronized with per-memcg dirty page counters.
1720 lock_page_memcg(page);
1721 newly_dirty = !TestSetPageDirty(page);
1722 spin_unlock(&mapping->private_lock);
1725 /* sigh - __set_page_dirty() is static, so copy it here, too */
1726 unsigned long flags;
1728 spin_lock_irqsave(&mapping->tree_lock, flags);
1729 if (page->mapping) { /* Race with truncate? */
1730 WARN_ON_ONCE(!PageUptodate(page));
1731 account_page_dirtied(page, mapping);
1732 radix_tree_tag_set(&mapping->page_tree,
1733 page_index(page), PAGECACHE_TAG_DIRTY);
1735 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1737 unlock_page_memcg(page);
1739 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1743 const struct address_space_operations xfs_address_space_operations = {
1744 .readpage = xfs_vm_readpage,
1745 .readpages = xfs_vm_readpages,
1746 .writepage = xfs_vm_writepage,
1747 .writepages = xfs_vm_writepages,
1748 .set_page_dirty = xfs_vm_set_page_dirty,
1749 .releasepage = xfs_vm_releasepage,
1750 .invalidatepage = xfs_vm_invalidatepage,
1751 .write_begin = xfs_vm_write_begin,
1752 .write_end = xfs_vm_write_end,
1753 .bmap = xfs_vm_bmap,
1754 .direct_IO = xfs_vm_direct_IO,
1755 .migratepage = buffer_migrate_page,
1756 .is_partially_uptodate = block_is_partially_uptodate,
1757 .error_remove_page = generic_error_remove_page,