]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/ext4/file.c
ext4: use ext4_get_block_write() for DAX
[karo-tx-linux.git] / fs / ext4 / file.c
1 /*
2  *  linux/fs/ext4/file.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/file.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  ext4 fs regular file handling primitives
16  *
17  *  64-bit file support on 64-bit platforms by Jakub Jelinek
18  *      (jj@sunsite.ms.mff.cuni.cz)
19  */
20
21 #include <linux/time.h>
22 #include <linux/fs.h>
23 #include <linux/mount.h>
24 #include <linux/path.h>
25 #include <linux/dax.h>
26 #include <linux/quotaops.h>
27 #include <linux/pagevec.h>
28 #include <linux/uio.h>
29 #include "ext4.h"
30 #include "ext4_jbd2.h"
31 #include "xattr.h"
32 #include "acl.h"
33
34 /*
35  * Called when an inode is released. Note that this is different
36  * from ext4_file_open: open gets called at every open, but release
37  * gets called only when /all/ the files are closed.
38  */
39 static int ext4_release_file(struct inode *inode, struct file *filp)
40 {
41         if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
42                 ext4_alloc_da_blocks(inode);
43                 ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
44         }
45         /* if we are the last writer on the inode, drop the block reservation */
46         if ((filp->f_mode & FMODE_WRITE) &&
47                         (atomic_read(&inode->i_writecount) == 1) &&
48                         !EXT4_I(inode)->i_reserved_data_blocks)
49         {
50                 down_write(&EXT4_I(inode)->i_data_sem);
51                 ext4_discard_preallocations(inode);
52                 up_write(&EXT4_I(inode)->i_data_sem);
53         }
54         if (is_dx(inode) && filp->private_data)
55                 ext4_htree_free_dir_info(filp->private_data);
56
57         return 0;
58 }
59
60 static void ext4_unwritten_wait(struct inode *inode)
61 {
62         wait_queue_head_t *wq = ext4_ioend_wq(inode);
63
64         wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0));
65 }
66
67 /*
68  * This tests whether the IO in question is block-aligned or not.
69  * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
70  * are converted to written only after the IO is complete.  Until they are
71  * mapped, these blocks appear as holes, so dio_zero_block() will assume that
72  * it needs to zero out portions of the start and/or end block.  If 2 AIO
73  * threads are at work on the same unwritten block, they must be synchronized
74  * or one thread will zero the other's data, causing corruption.
75  */
76 static int
77 ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
78 {
79         struct super_block *sb = inode->i_sb;
80         int blockmask = sb->s_blocksize - 1;
81
82         if (pos >= i_size_read(inode))
83                 return 0;
84
85         if ((pos | iov_iter_alignment(from)) & blockmask)
86                 return 1;
87
88         return 0;
89 }
90
91 static ssize_t
92 ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
93 {
94         struct file *file = iocb->ki_filp;
95         struct inode *inode = file_inode(iocb->ki_filp);
96         struct mutex *aio_mutex = NULL;
97         struct blk_plug plug;
98         int o_direct = iocb->ki_flags & IOCB_DIRECT;
99         int overwrite = 0;
100         ssize_t ret;
101
102         /*
103          * Unaligned direct AIO must be serialized; see comment above
104          * In the case of O_APPEND, assume that we must always serialize
105          */
106         if (o_direct &&
107             ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) &&
108             !is_sync_kiocb(iocb) &&
109             (iocb->ki_flags & IOCB_APPEND ||
110              ext4_unaligned_aio(inode, from, iocb->ki_pos))) {
111                 aio_mutex = ext4_aio_mutex(inode);
112                 mutex_lock(aio_mutex);
113                 ext4_unwritten_wait(inode);
114         }
115
116         mutex_lock(&inode->i_mutex);
117         ret = generic_write_checks(iocb, from);
118         if (ret <= 0)
119                 goto out;
120
121         /*
122          * If we have encountered a bitmap-format file, the size limit
123          * is smaller than s_maxbytes, which is for extent-mapped files.
124          */
125         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
126                 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
127
128                 if (iocb->ki_pos >= sbi->s_bitmap_maxbytes) {
129                         ret = -EFBIG;
130                         goto out;
131                 }
132                 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
133         }
134
135         iocb->private = &overwrite;
136         if (o_direct) {
137                 size_t length = iov_iter_count(from);
138                 loff_t pos = iocb->ki_pos;
139                 blk_start_plug(&plug);
140
141                 /* check whether we do a DIO overwrite or not */
142                 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
143                     !file->f_mapping->nrpages && pos + length <= i_size_read(inode)) {
144                         struct ext4_map_blocks map;
145                         unsigned int blkbits = inode->i_blkbits;
146                         int err, len;
147
148                         map.m_lblk = pos >> blkbits;
149                         map.m_len = (EXT4_BLOCK_ALIGN(pos + length, blkbits) >> blkbits)
150                                 - map.m_lblk;
151                         len = map.m_len;
152
153                         err = ext4_map_blocks(NULL, inode, &map, 0);
154                         /*
155                          * 'err==len' means that all of blocks has
156                          * been preallocated no matter they are
157                          * initialized or not.  For excluding
158                          * unwritten extents, we need to check
159                          * m_flags.  There are two conditions that
160                          * indicate for initialized extents.  1) If we
161                          * hit extent cache, EXT4_MAP_MAPPED flag is
162                          * returned; 2) If we do a real lookup,
163                          * non-flags are returned.  So we should check
164                          * these two conditions.
165                          */
166                         if (err == len && (map.m_flags & EXT4_MAP_MAPPED))
167                                 overwrite = 1;
168                 }
169         }
170
171         ret = __generic_file_write_iter(iocb, from);
172         mutex_unlock(&inode->i_mutex);
173
174         if (ret > 0) {
175                 ssize_t err;
176
177                 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
178                 if (err < 0)
179                         ret = err;
180         }
181         if (o_direct)
182                 blk_finish_plug(&plug);
183
184         if (aio_mutex)
185                 mutex_unlock(aio_mutex);
186         return ret;
187
188 out:
189         mutex_unlock(&inode->i_mutex);
190         if (aio_mutex)
191                 mutex_unlock(aio_mutex);
192         return ret;
193 }
194
195 #ifdef CONFIG_FS_DAX
196 static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
197 {
198         struct inode *inode = bh->b_assoc_map->host;
199         /* XXX: breaks on 32-bit > 16TB. Is that even supported? */
200         loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
201         int err;
202         if (!uptodate)
203                 return;
204         WARN_ON(!buffer_unwritten(bh));
205         err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size);
206 }
207
208 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
209 {
210         return dax_fault(vma, vmf, ext4_get_block_write, ext4_end_io_unwritten);
211 }
212
213 static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
214                                                 pmd_t *pmd, unsigned int flags)
215 {
216         return dax_pmd_fault(vma, addr, pmd, flags, ext4_get_block_write,
217                                 ext4_end_io_unwritten);
218 }
219
220 static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
221 {
222         return dax_mkwrite(vma, vmf, ext4_get_block_write,
223                                 ext4_end_io_unwritten);
224 }
225
226 static const struct vm_operations_struct ext4_dax_vm_ops = {
227         .fault          = ext4_dax_fault,
228         .pmd_fault      = ext4_dax_pmd_fault,
229         .page_mkwrite   = ext4_dax_mkwrite,
230         .pfn_mkwrite    = dax_pfn_mkwrite,
231 };
232 #else
233 #define ext4_dax_vm_ops ext4_file_vm_ops
234 #endif
235
236 static const struct vm_operations_struct ext4_file_vm_ops = {
237         .fault          = filemap_fault,
238         .map_pages      = filemap_map_pages,
239         .page_mkwrite   = ext4_page_mkwrite,
240 };
241
242 static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
243 {
244         struct inode *inode = file->f_mapping->host;
245
246         if (ext4_encrypted_inode(inode)) {
247                 int err = ext4_get_encryption_info(inode);
248                 if (err)
249                         return 0;
250                 if (ext4_encryption_info(inode) == NULL)
251                         return -ENOKEY;
252         }
253         file_accessed(file);
254         if (IS_DAX(file_inode(file))) {
255                 vma->vm_ops = &ext4_dax_vm_ops;
256                 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
257         } else {
258                 vma->vm_ops = &ext4_file_vm_ops;
259         }
260         return 0;
261 }
262
263 static int ext4_file_open(struct inode * inode, struct file * filp)
264 {
265         struct super_block *sb = inode->i_sb;
266         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
267         struct vfsmount *mnt = filp->f_path.mnt;
268         struct path path;
269         char buf[64], *cp;
270         int ret;
271
272         if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
273                      !(sb->s_flags & MS_RDONLY))) {
274                 sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
275                 /*
276                  * Sample where the filesystem has been mounted and
277                  * store it in the superblock for sysadmin convenience
278                  * when trying to sort through large numbers of block
279                  * devices or filesystem images.
280                  */
281                 memset(buf, 0, sizeof(buf));
282                 path.mnt = mnt;
283                 path.dentry = mnt->mnt_root;
284                 cp = d_path(&path, buf, sizeof(buf));
285                 if (!IS_ERR(cp)) {
286                         handle_t *handle;
287                         int err;
288
289                         handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
290                         if (IS_ERR(handle))
291                                 return PTR_ERR(handle);
292                         BUFFER_TRACE(sbi->s_sbh, "get_write_access");
293                         err = ext4_journal_get_write_access(handle, sbi->s_sbh);
294                         if (err) {
295                                 ext4_journal_stop(handle);
296                                 return err;
297                         }
298                         strlcpy(sbi->s_es->s_last_mounted, cp,
299                                 sizeof(sbi->s_es->s_last_mounted));
300                         ext4_handle_dirty_super(handle, sb);
301                         ext4_journal_stop(handle);
302                 }
303         }
304         if (ext4_encrypted_inode(inode)) {
305                 ret = ext4_get_encryption_info(inode);
306                 if (ret)
307                         return -EACCES;
308                 if (ext4_encryption_info(inode) == NULL)
309                         return -ENOKEY;
310         }
311         /*
312          * Set up the jbd2_inode if we are opening the inode for
313          * writing and the journal is present
314          */
315         if (filp->f_mode & FMODE_WRITE) {
316                 ret = ext4_inode_attach_jinode(inode);
317                 if (ret < 0)
318                         return ret;
319         }
320         return dquot_file_open(inode, filp);
321 }
322
323 /*
324  * Here we use ext4_map_blocks() to get a block mapping for a extent-based
325  * file rather than ext4_ext_walk_space() because we can introduce
326  * SEEK_DATA/SEEK_HOLE for block-mapped and extent-mapped file at the same
327  * function.  When extent status tree has been fully implemented, it will
328  * track all extent status for a file and we can directly use it to
329  * retrieve the offset for SEEK_DATA/SEEK_HOLE.
330  */
331
332 /*
333  * When we retrieve the offset for SEEK_DATA/SEEK_HOLE, we would need to
334  * lookup page cache to check whether or not there has some data between
335  * [startoff, endoff] because, if this range contains an unwritten extent,
336  * we determine this extent as a data or a hole according to whether the
337  * page cache has data or not.
338  */
339 static int ext4_find_unwritten_pgoff(struct inode *inode,
340                                      int whence,
341                                      struct ext4_map_blocks *map,
342                                      loff_t *offset)
343 {
344         struct pagevec pvec;
345         unsigned int blkbits;
346         pgoff_t index;
347         pgoff_t end;
348         loff_t endoff;
349         loff_t startoff;
350         loff_t lastoff;
351         int found = 0;
352
353         blkbits = inode->i_sb->s_blocksize_bits;
354         startoff = *offset;
355         lastoff = startoff;
356         endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
357
358         index = startoff >> PAGE_CACHE_SHIFT;
359         end = endoff >> PAGE_CACHE_SHIFT;
360
361         pagevec_init(&pvec, 0);
362         do {
363                 int i, num;
364                 unsigned long nr_pages;
365
366                 num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
367                 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
368                                           (pgoff_t)num);
369                 if (nr_pages == 0) {
370                         if (whence == SEEK_DATA)
371                                 break;
372
373                         BUG_ON(whence != SEEK_HOLE);
374                         /*
375                          * If this is the first time to go into the loop and
376                          * offset is not beyond the end offset, it will be a
377                          * hole at this offset
378                          */
379                         if (lastoff == startoff || lastoff < endoff)
380                                 found = 1;
381                         break;
382                 }
383
384                 /*
385                  * If this is the first time to go into the loop and
386                  * offset is smaller than the first page offset, it will be a
387                  * hole at this offset.
388                  */
389                 if (lastoff == startoff && whence == SEEK_HOLE &&
390                     lastoff < page_offset(pvec.pages[0])) {
391                         found = 1;
392                         break;
393                 }
394
395                 for (i = 0; i < nr_pages; i++) {
396                         struct page *page = pvec.pages[i];
397                         struct buffer_head *bh, *head;
398
399                         /*
400                          * If the current offset is not beyond the end of given
401                          * range, it will be a hole.
402                          */
403                         if (lastoff < endoff && whence == SEEK_HOLE &&
404                             page->index > end) {
405                                 found = 1;
406                                 *offset = lastoff;
407                                 goto out;
408                         }
409
410                         lock_page(page);
411
412                         if (unlikely(page->mapping != inode->i_mapping)) {
413                                 unlock_page(page);
414                                 continue;
415                         }
416
417                         if (!page_has_buffers(page)) {
418                                 unlock_page(page);
419                                 continue;
420                         }
421
422                         if (page_has_buffers(page)) {
423                                 lastoff = page_offset(page);
424                                 bh = head = page_buffers(page);
425                                 do {
426                                         if (buffer_uptodate(bh) ||
427                                             buffer_unwritten(bh)) {
428                                                 if (whence == SEEK_DATA)
429                                                         found = 1;
430                                         } else {
431                                                 if (whence == SEEK_HOLE)
432                                                         found = 1;
433                                         }
434                                         if (found) {
435                                                 *offset = max_t(loff_t,
436                                                         startoff, lastoff);
437                                                 unlock_page(page);
438                                                 goto out;
439                                         }
440                                         lastoff += bh->b_size;
441                                         bh = bh->b_this_page;
442                                 } while (bh != head);
443                         }
444
445                         lastoff = page_offset(page) + PAGE_SIZE;
446                         unlock_page(page);
447                 }
448
449                 /*
450                  * The no. of pages is less than our desired, that would be a
451                  * hole in there.
452                  */
453                 if (nr_pages < num && whence == SEEK_HOLE) {
454                         found = 1;
455                         *offset = lastoff;
456                         break;
457                 }
458
459                 index = pvec.pages[i - 1]->index + 1;
460                 pagevec_release(&pvec);
461         } while (index <= end);
462
463 out:
464         pagevec_release(&pvec);
465         return found;
466 }
467
468 /*
469  * ext4_seek_data() retrieves the offset for SEEK_DATA.
470  */
471 static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
472 {
473         struct inode *inode = file->f_mapping->host;
474         struct ext4_map_blocks map;
475         struct extent_status es;
476         ext4_lblk_t start, last, end;
477         loff_t dataoff, isize;
478         int blkbits;
479         int ret = 0;
480
481         mutex_lock(&inode->i_mutex);
482
483         isize = i_size_read(inode);
484         if (offset >= isize) {
485                 mutex_unlock(&inode->i_mutex);
486                 return -ENXIO;
487         }
488
489         blkbits = inode->i_sb->s_blocksize_bits;
490         start = offset >> blkbits;
491         last = start;
492         end = isize >> blkbits;
493         dataoff = offset;
494
495         do {
496                 map.m_lblk = last;
497                 map.m_len = end - last + 1;
498                 ret = ext4_map_blocks(NULL, inode, &map, 0);
499                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
500                         if (last != start)
501                                 dataoff = (loff_t)last << blkbits;
502                         break;
503                 }
504
505                 /*
506                  * If there is a delay extent at this offset,
507                  * it will be as a data.
508                  */
509                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
510                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
511                         if (last != start)
512                                 dataoff = (loff_t)last << blkbits;
513                         break;
514                 }
515
516                 /*
517                  * If there is a unwritten extent at this offset,
518                  * it will be as a data or a hole according to page
519                  * cache that has data or not.
520                  */
521                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
522                         int unwritten;
523                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
524                                                               &map, &dataoff);
525                         if (unwritten)
526                                 break;
527                 }
528
529                 last++;
530                 dataoff = (loff_t)last << blkbits;
531         } while (last <= end);
532
533         mutex_unlock(&inode->i_mutex);
534
535         if (dataoff > isize)
536                 return -ENXIO;
537
538         return vfs_setpos(file, dataoff, maxsize);
539 }
540
541 /*
542  * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
543  */
544 static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
545 {
546         struct inode *inode = file->f_mapping->host;
547         struct ext4_map_blocks map;
548         struct extent_status es;
549         ext4_lblk_t start, last, end;
550         loff_t holeoff, isize;
551         int blkbits;
552         int ret = 0;
553
554         mutex_lock(&inode->i_mutex);
555
556         isize = i_size_read(inode);
557         if (offset >= isize) {
558                 mutex_unlock(&inode->i_mutex);
559                 return -ENXIO;
560         }
561
562         blkbits = inode->i_sb->s_blocksize_bits;
563         start = offset >> blkbits;
564         last = start;
565         end = isize >> blkbits;
566         holeoff = offset;
567
568         do {
569                 map.m_lblk = last;
570                 map.m_len = end - last + 1;
571                 ret = ext4_map_blocks(NULL, inode, &map, 0);
572                 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
573                         last += ret;
574                         holeoff = (loff_t)last << blkbits;
575                         continue;
576                 }
577
578                 /*
579                  * If there is a delay extent at this offset,
580                  * we will skip this extent.
581                  */
582                 ext4_es_find_delayed_extent_range(inode, last, last, &es);
583                 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
584                         last = es.es_lblk + es.es_len;
585                         holeoff = (loff_t)last << blkbits;
586                         continue;
587                 }
588
589                 /*
590                  * If there is a unwritten extent at this offset,
591                  * it will be as a data or a hole according to page
592                  * cache that has data or not.
593                  */
594                 if (map.m_flags & EXT4_MAP_UNWRITTEN) {
595                         int unwritten;
596                         unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
597                                                               &map, &holeoff);
598                         if (!unwritten) {
599                                 last += ret;
600                                 holeoff = (loff_t)last << blkbits;
601                                 continue;
602                         }
603                 }
604
605                 /* find a hole */
606                 break;
607         } while (last <= end);
608
609         mutex_unlock(&inode->i_mutex);
610
611         if (holeoff > isize)
612                 holeoff = isize;
613
614         return vfs_setpos(file, holeoff, maxsize);
615 }
616
617 /*
618  * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
619  * by calling generic_file_llseek_size() with the appropriate maxbytes
620  * value for each.
621  */
622 loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
623 {
624         struct inode *inode = file->f_mapping->host;
625         loff_t maxbytes;
626
627         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
628                 maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
629         else
630                 maxbytes = inode->i_sb->s_maxbytes;
631
632         switch (whence) {
633         case SEEK_SET:
634         case SEEK_CUR:
635         case SEEK_END:
636                 return generic_file_llseek_size(file, offset, whence,
637                                                 maxbytes, i_size_read(inode));
638         case SEEK_DATA:
639                 return ext4_seek_data(file, offset, maxbytes);
640         case SEEK_HOLE:
641                 return ext4_seek_hole(file, offset, maxbytes);
642         }
643
644         return -EINVAL;
645 }
646
647 const struct file_operations ext4_file_operations = {
648         .llseek         = ext4_llseek,
649         .read_iter      = generic_file_read_iter,
650         .write_iter     = ext4_file_write_iter,
651         .unlocked_ioctl = ext4_ioctl,
652 #ifdef CONFIG_COMPAT
653         .compat_ioctl   = ext4_compat_ioctl,
654 #endif
655         .mmap           = ext4_file_mmap,
656         .open           = ext4_file_open,
657         .release        = ext4_release_file,
658         .fsync          = ext4_sync_file,
659         .splice_read    = generic_file_splice_read,
660         .splice_write   = iter_file_splice_write,
661         .fallocate      = ext4_fallocate,
662 };
663
664 const struct inode_operations ext4_file_inode_operations = {
665         .setattr        = ext4_setattr,
666         .getattr        = ext4_getattr,
667         .setxattr       = generic_setxattr,
668         .getxattr       = generic_getxattr,
669         .listxattr      = ext4_listxattr,
670         .removexattr    = generic_removexattr,
671         .get_acl        = ext4_get_acl,
672         .set_acl        = ext4_set_acl,
673         .fiemap         = ext4_fiemap,
674 };
675