d94af71a4e7fcabd1783f28cccb86363473fe3ae
[karo-tx-linux.git] / fs / ext4 / readpage.c
1 /*
2  * linux/fs/ext4/readpage.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  * Copyright (C) 2015, Google, Inc.
6  *
7  * This was originally taken from fs/mpage.c
8  *
9  * The intent is the ext4_mpage_readpages() function here is intended
10  * to replace mpage_readpages() in the general case, not just for
11  * encrypted files.  It has some limitations (see below), where it
12  * will fall back to read_block_full_page(), but these limitations
13  * should only be hit when page_size != block_size.
14  *
15  * This will allow us to attach a callback function to support ext4
16  * encryption.
17  *
18  * If anything unusual happens, such as:
19  *
20  * - encountering a page which has buffers
21  * - encountering a page which has a non-hole after a hole
22  * - encountering a page with non-contiguous blocks
23  *
24  * then this code just gives up and calls the buffer_head-based read function.
25  * It does handle a page which has holes at the end - that is a common case:
26  * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
27  *
28  */
29
30 #include <linux/kernel.h>
31 #include <linux/export.h>
32 #include <linux/mm.h>
33 #include <linux/kdev_t.h>
34 #include <linux/gfp.h>
35 #include <linux/bio.h>
36 #include <linux/fs.h>
37 #include <linux/buffer_head.h>
38 #include <linux/blkdev.h>
39 #include <linux/highmem.h>
40 #include <linux/prefetch.h>
41 #include <linux/mpage.h>
42 #include <linux/writeback.h>
43 #include <linux/backing-dev.h>
44 #include <linux/pagevec.h>
45 #include <linux/cleancache.h>
46
47 #include "ext4.h"
48
49 /*
50  * Call ext4_decrypt on every single page, reusing the encryption
51  * context.
52  */
53 static void completion_pages(struct work_struct *work)
54 {
55 #ifdef CONFIG_EXT4_FS_ENCRYPTION
56         struct ext4_crypto_ctx *ctx =
57                 container_of(work, struct ext4_crypto_ctx, r.work);
58         struct bio      *bio    = ctx->r.bio;
59         struct bio_vec  *bv;
60         int             i;
61
62         bio_for_each_segment_all(bv, bio, i) {
63                 struct page *page = bv->bv_page;
64
65                 int ret = ext4_decrypt(page);
66                 if (ret) {
67                         WARN_ON_ONCE(1);
68                         SetPageError(page);
69                 } else
70                         SetPageUptodate(page);
71                 unlock_page(page);
72         }
73         ext4_release_crypto_ctx(ctx);
74         bio_put(bio);
75 #else
76         BUG();
77 #endif
78 }
79
80 static inline bool ext4_bio_encrypted(struct bio *bio)
81 {
82 #ifdef CONFIG_EXT4_FS_ENCRYPTION
83         return unlikely(bio->bi_private != NULL);
84 #else
85         return false;
86 #endif
87 }
88
89 /*
90  * I/O completion handler for multipage BIOs.
91  *
92  * The mpage code never puts partial pages into a BIO (except for end-of-file).
93  * If a page does not map to a contiguous run of blocks then it simply falls
94  * back to block_read_full_page().
95  *
96  * Why is this?  If a page's completion depends on a number of different BIOs
97  * which can complete in any order (or at the same time) then determining the
98  * status of that page is hard.  See end_buffer_async_read() for the details.
99  * There is no point in duplicating all that complexity.
100  */
101 static void mpage_end_io(struct bio *bio)
102 {
103         struct bio_vec *bv;
104         int i;
105
106         if (ext4_bio_encrypted(bio)) {
107                 struct ext4_crypto_ctx *ctx = bio->bi_private;
108
109                 if (bio->bi_error) {
110                         ext4_release_crypto_ctx(ctx);
111                 } else {
112                         INIT_WORK(&ctx->r.work, completion_pages);
113                         ctx->r.bio = bio;
114                         queue_work(ext4_read_workqueue, &ctx->r.work);
115                         return;
116                 }
117         }
118         bio_for_each_segment_all(bv, bio, i) {
119                 struct page *page = bv->bv_page;
120
121                 if (!bio->bi_error) {
122                         SetPageUptodate(page);
123                 } else {
124                         ClearPageUptodate(page);
125                         SetPageError(page);
126                 }
127                 unlock_page(page);
128         }
129
130         bio_put(bio);
131 }
132
133 int ext4_mpage_readpages(struct address_space *mapping,
134                          struct list_head *pages, struct page *page,
135                          unsigned nr_pages)
136 {
137         struct bio *bio = NULL;
138         unsigned page_idx;
139         sector_t last_block_in_bio = 0;
140
141         struct inode *inode = mapping->host;
142         const unsigned blkbits = inode->i_blkbits;
143         const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
144         const unsigned blocksize = 1 << blkbits;
145         sector_t block_in_file;
146         sector_t last_block;
147         sector_t last_block_in_file;
148         sector_t blocks[MAX_BUF_PER_PAGE];
149         unsigned page_block;
150         struct block_device *bdev = inode->i_sb->s_bdev;
151         int length;
152         unsigned relative_block = 0;
153         struct ext4_map_blocks map;
154
155         map.m_pblk = 0;
156         map.m_lblk = 0;
157         map.m_len = 0;
158         map.m_flags = 0;
159
160         for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
161                 int fully_mapped = 1;
162                 unsigned first_hole = blocks_per_page;
163
164                 prefetchw(&page->flags);
165                 if (pages) {
166                         page = list_entry(pages->prev, struct page, lru);
167                         list_del(&page->lru);
168                         if (add_to_page_cache_lru(page, mapping, page->index,
169                                         GFP_KERNEL & mapping_gfp_mask(mapping)))
170                                 goto next_page;
171                 }
172
173                 if (page_has_buffers(page))
174                         goto confused;
175
176                 block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
177                 last_block = block_in_file + nr_pages * blocks_per_page;
178                 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
179                 if (last_block > last_block_in_file)
180                         last_block = last_block_in_file;
181                 page_block = 0;
182
183                 /*
184                  * Map blocks using the previous result first.
185                  */
186                 if ((map.m_flags & EXT4_MAP_MAPPED) &&
187                     block_in_file > map.m_lblk &&
188                     block_in_file < (map.m_lblk + map.m_len)) {
189                         unsigned map_offset = block_in_file - map.m_lblk;
190                         unsigned last = map.m_len - map_offset;
191
192                         for (relative_block = 0; ; relative_block++) {
193                                 if (relative_block == last) {
194                                         /* needed? */
195                                         map.m_flags &= ~EXT4_MAP_MAPPED;
196                                         break;
197                                 }
198                                 if (page_block == blocks_per_page)
199                                         break;
200                                 blocks[page_block] = map.m_pblk + map_offset +
201                                         relative_block;
202                                 page_block++;
203                                 block_in_file++;
204                         }
205                 }
206
207                 /*
208                  * Then do more ext4_map_blocks() calls until we are
209                  * done with this page.
210                  */
211                 while (page_block < blocks_per_page) {
212                         if (block_in_file < last_block) {
213                                 map.m_lblk = block_in_file;
214                                 map.m_len = last_block - block_in_file;
215
216                                 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
217                                 set_error_page:
218                                         SetPageError(page);
219                                         zero_user_segment(page, 0,
220                                                           PAGE_CACHE_SIZE);
221                                         unlock_page(page);
222                                         goto next_page;
223                                 }
224                         }
225                         if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
226                                 fully_mapped = 0;
227                                 if (first_hole == blocks_per_page)
228                                         first_hole = page_block;
229                                 page_block++;
230                                 block_in_file++;
231                                 continue;
232                         }
233                         if (first_hole != blocks_per_page)
234                                 goto confused;          /* hole -> non-hole */
235
236                         /* Contiguous blocks? */
237                         if (page_block && blocks[page_block-1] != map.m_pblk-1)
238                                 goto confused;
239                         for (relative_block = 0; ; relative_block++) {
240                                 if (relative_block == map.m_len) {
241                                         /* needed? */
242                                         map.m_flags &= ~EXT4_MAP_MAPPED;
243                                         break;
244                                 } else if (page_block == blocks_per_page)
245                                         break;
246                                 blocks[page_block] = map.m_pblk+relative_block;
247                                 page_block++;
248                                 block_in_file++;
249                         }
250                 }
251                 if (first_hole != blocks_per_page) {
252                         zero_user_segment(page, first_hole << blkbits,
253                                           PAGE_CACHE_SIZE);
254                         if (first_hole == 0) {
255                                 SetPageUptodate(page);
256                                 unlock_page(page);
257                                 goto next_page;
258                         }
259                 } else if (fully_mapped) {
260                         SetPageMappedToDisk(page);
261                 }
262                 if (fully_mapped && blocks_per_page == 1 &&
263                     !PageUptodate(page) && cleancache_get_page(page) == 0) {
264                         SetPageUptodate(page);
265                         goto confused;
266                 }
267
268                 /*
269                  * This page will go to BIO.  Do we need to send this
270                  * BIO off first?
271                  */
272                 if (bio && (last_block_in_bio != blocks[0] - 1)) {
273                 submit_and_realloc:
274                         submit_bio(READ, bio);
275                         bio = NULL;
276                 }
277                 if (bio == NULL) {
278                         struct ext4_crypto_ctx *ctx = NULL;
279
280                         if (ext4_encrypted_inode(inode) &&
281                             S_ISREG(inode->i_mode)) {
282                                 ctx = ext4_get_crypto_ctx(inode);
283                                 if (IS_ERR(ctx))
284                                         goto set_error_page;
285                         }
286                         bio = bio_alloc(GFP_KERNEL,
287                                 min_t(int, nr_pages, BIO_MAX_PAGES));
288                         if (!bio) {
289                                 if (ctx)
290                                         ext4_release_crypto_ctx(ctx);
291                                 goto set_error_page;
292                         }
293                         bio->bi_bdev = bdev;
294                         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
295                         bio->bi_end_io = mpage_end_io;
296                         bio->bi_private = ctx;
297                 }
298
299                 length = first_hole << blkbits;
300                 if (bio_add_page(bio, page, length, 0) < length)
301                         goto submit_and_realloc;
302
303                 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
304                      (relative_block == map.m_len)) ||
305                     (first_hole != blocks_per_page)) {
306                         submit_bio(READ, bio);
307                         bio = NULL;
308                 } else
309                         last_block_in_bio = blocks[blocks_per_page - 1];
310                 goto next_page;
311         confused:
312                 if (bio) {
313                         submit_bio(READ, bio);
314                         bio = NULL;
315                 }
316                 if (!PageUptodate(page))
317                         block_read_full_page(page, ext4_get_block);
318                 else
319                         unlock_page(page);
320         next_page:
321                 if (pages)
322                         page_cache_release(page);
323         }
324         BUG_ON(pages && !list_empty(pages));
325         if (bio)
326                 submit_bio(READ, bio);
327         return 0;
328 }