2 * linux/fs/ext4/readpage.c
4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2015, Google, Inc.
7 * This was originally taken from fs/mpage.c
9 * The intent is the ext4_mpage_readpages() function here is intended
10 * to replace mpage_readpages() in the general case, not just for
11 * encrypted files. It has some limitations (see below), where it
12 * will fall back to read_block_full_page(), but these limitations
13 * should only be hit when page_size != block_size.
15 * This will allow us to attach a callback function to support ext4
18 * If anything unusual happens, such as:
20 * - encountering a page which has buffers
21 * - encountering a page which has a non-hole after a hole
22 * - encountering a page with non-contiguous blocks
24 * then this code just gives up and calls the buffer_head-based read function.
25 * It does handle a page which has holes at the end - that is a common case:
26 * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
30 #include <linux/kernel.h>
31 #include <linux/export.h>
33 #include <linux/kdev_t.h>
34 #include <linux/gfp.h>
35 #include <linux/bio.h>
37 #include <linux/buffer_head.h>
38 #include <linux/blkdev.h>
39 #include <linux/highmem.h>
40 #include <linux/prefetch.h>
41 #include <linux/mpage.h>
42 #include <linux/writeback.h>
43 #include <linux/backing-dev.h>
44 #include <linux/pagevec.h>
45 #include <linux/cleancache.h>
50 * I/O completion handler for multipage BIOs.
52 * The mpage code never puts partial pages into a BIO (except for end-of-file).
53 * If a page does not map to a contiguous run of blocks then it simply falls
54 * back to block_read_full_page().
56 * Why is this? If a page's completion depends on a number of different BIOs
57 * which can complete in any order (or at the same time) then determining the
58 * status of that page is hard. See end_buffer_async_read() for the details.
59 * There is no point in duplicating all that complexity.
61 static void mpage_end_io(struct bio *bio, int err)
66 bio_for_each_segment_all(bv, bio, i) {
67 struct page *page = bv->bv_page;
70 SetPageUptodate(page);
72 ClearPageUptodate(page);
81 int ext4_mpage_readpages(struct address_space *mapping,
82 struct list_head *pages, struct page *page,
85 struct bio *bio = NULL;
87 sector_t last_block_in_bio = 0;
89 struct inode *inode = mapping->host;
90 const unsigned blkbits = inode->i_blkbits;
91 const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
92 const unsigned blocksize = 1 << blkbits;
93 sector_t block_in_file;
95 sector_t last_block_in_file;
96 sector_t blocks[MAX_BUF_PER_PAGE];
98 struct block_device *bdev = inode->i_sb->s_bdev;
100 unsigned relative_block = 0;
101 struct ext4_map_blocks map;
108 for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
109 int fully_mapped = 1;
110 unsigned first_hole = blocks_per_page;
112 prefetchw(&page->flags);
114 page = list_entry(pages->prev, struct page, lru);
115 list_del(&page->lru);
116 if (add_to_page_cache_lru(page, mapping,
117 page->index, GFP_KERNEL))
121 if (page_has_buffers(page))
124 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
125 last_block = block_in_file + nr_pages * blocks_per_page;
126 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
127 if (last_block > last_block_in_file)
128 last_block = last_block_in_file;
132 * Map blocks using the previous result first.
134 if ((map.m_flags & EXT4_MAP_MAPPED) &&
135 block_in_file > map.m_lblk &&
136 block_in_file < (map.m_lblk + map.m_len)) {
137 unsigned map_offset = block_in_file - map.m_lblk;
138 unsigned last = map.m_len - map_offset;
140 for (relative_block = 0; ; relative_block++) {
141 if (relative_block == last) {
143 map.m_flags &= ~EXT4_MAP_MAPPED;
146 if (page_block == blocks_per_page)
148 blocks[page_block] = map.m_pblk + map_offset +
156 * Then do more ext4_map_blocks() calls until we are
157 * done with this page.
159 while (page_block < blocks_per_page) {
160 if (block_in_file < last_block) {
161 map.m_lblk = block_in_file;
162 map.m_len = last_block - block_in_file;
164 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
167 zero_user_segment(page, 0,
173 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
175 if (first_hole == blocks_per_page)
176 first_hole = page_block;
181 if (first_hole != blocks_per_page)
182 goto confused; /* hole -> non-hole */
184 /* Contiguous blocks? */
185 if (page_block && blocks[page_block-1] != map.m_pblk-1)
187 for (relative_block = 0; ; relative_block++) {
188 if (relative_block == map.m_len) {
190 map.m_flags &= ~EXT4_MAP_MAPPED;
192 } else if (page_block == blocks_per_page)
194 blocks[page_block] = map.m_pblk+relative_block;
199 if (first_hole != blocks_per_page) {
200 zero_user_segment(page, first_hole << blkbits,
202 if (first_hole == 0) {
203 SetPageUptodate(page);
207 } else if (fully_mapped) {
208 SetPageMappedToDisk(page);
210 if (fully_mapped && blocks_per_page == 1 &&
211 !PageUptodate(page) && cleancache_get_page(page) == 0) {
212 SetPageUptodate(page);
217 * This page will go to BIO. Do we need to send this
220 if (bio && (last_block_in_bio != blocks[0] - 1)) {
222 submit_bio(READ, bio);
226 bio = bio_alloc(GFP_KERNEL,
227 min_t(int, nr_pages, bio_get_nr_vecs(bdev)));
231 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
232 bio->bi_end_io = mpage_end_io;
235 length = first_hole << blkbits;
236 if (bio_add_page(bio, page, length, 0) < length)
237 goto submit_and_realloc;
239 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
240 (relative_block == map.m_len)) ||
241 (first_hole != blocks_per_page)) {
242 submit_bio(READ, bio);
245 last_block_in_bio = blocks[blocks_per_page - 1];
249 submit_bio(READ, bio);
252 if (!PageUptodate(page))
253 block_read_full_page(page, ext4_get_block);
258 page_cache_release(page);
260 BUG_ON(pages && !list_empty(pages));
262 submit_bio(READ, bio);