]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/dax.c
Merge tag 'dax-misc-for-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm...
[karo-tx-linux.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34
35 #define RADIX_DAX_MASK  0xf
36 #define RADIX_DAX_SHIFT 4
37 #define RADIX_DAX_PTE  (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
38 #define RADIX_DAX_PMD  (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
39 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
40 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
41 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
42                 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
43
44 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
45 {
46         struct request_queue *q = bdev->bd_queue;
47         long rc = -EIO;
48
49         dax->addr = (void __pmem *) ERR_PTR(-EIO);
50         if (blk_queue_enter(q, true) != 0)
51                 return rc;
52
53         rc = bdev_direct_access(bdev, dax);
54         if (rc < 0) {
55                 dax->addr = (void __pmem *) ERR_PTR(rc);
56                 blk_queue_exit(q);
57                 return rc;
58         }
59         return rc;
60 }
61
62 static void dax_unmap_atomic(struct block_device *bdev,
63                 const struct blk_dax_ctl *dax)
64 {
65         if (IS_ERR(dax->addr))
66                 return;
67         blk_queue_exit(bdev->bd_queue);
68 }
69
70 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
71 {
72         struct page *page = alloc_pages(GFP_KERNEL, 0);
73         struct blk_dax_ctl dax = {
74                 .size = PAGE_SIZE,
75                 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
76         };
77         long rc;
78
79         if (!page)
80                 return ERR_PTR(-ENOMEM);
81
82         rc = dax_map_atomic(bdev, &dax);
83         if (rc < 0)
84                 return ERR_PTR(rc);
85         memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
86         dax_unmap_atomic(bdev, &dax);
87         return page;
88 }
89
90 static bool buffer_written(struct buffer_head *bh)
91 {
92         return buffer_mapped(bh) && !buffer_unwritten(bh);
93 }
94
95 /*
96  * When ext4 encounters a hole, it returns without modifying the buffer_head
97  * which means that we can't trust b_size.  To cope with this, we set b_state
98  * to 0 before calling get_block and, if any bit is set, we know we can trust
99  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
100  * and would save us time calling get_block repeatedly.
101  */
102 static bool buffer_size_valid(struct buffer_head *bh)
103 {
104         return bh->b_state != 0;
105 }
106
107
108 static sector_t to_sector(const struct buffer_head *bh,
109                 const struct inode *inode)
110 {
111         sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
112
113         return sector;
114 }
115
116 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
117                       loff_t start, loff_t end, get_block_t get_block,
118                       struct buffer_head *bh)
119 {
120         loff_t pos = start, max = start, bh_max = start;
121         bool hole = false, need_wmb = false;
122         struct block_device *bdev = NULL;
123         int rw = iov_iter_rw(iter), rc;
124         long map_len = 0;
125         struct blk_dax_ctl dax = {
126                 .addr = (void __pmem *) ERR_PTR(-EIO),
127         };
128         unsigned blkbits = inode->i_blkbits;
129         sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
130                                                                 >> blkbits;
131
132         if (rw == READ)
133                 end = min(end, i_size_read(inode));
134
135         while (pos < end) {
136                 size_t len;
137                 if (pos == max) {
138                         long page = pos >> PAGE_SHIFT;
139                         sector_t block = page << (PAGE_SHIFT - blkbits);
140                         unsigned first = pos - (block << blkbits);
141                         long size;
142
143                         if (pos == bh_max) {
144                                 bh->b_size = PAGE_ALIGN(end - pos);
145                                 bh->b_state = 0;
146                                 rc = get_block(inode, block, bh, rw == WRITE);
147                                 if (rc)
148                                         break;
149                                 if (!buffer_size_valid(bh))
150                                         bh->b_size = 1 << blkbits;
151                                 bh_max = pos - first + bh->b_size;
152                                 bdev = bh->b_bdev;
153                                 /*
154                                  * We allow uninitialized buffers for writes
155                                  * beyond EOF as those cannot race with faults
156                                  */
157                                 WARN_ON_ONCE(
158                                         (buffer_new(bh) && block < file_blks) ||
159                                         (rw == WRITE && buffer_unwritten(bh)));
160                         } else {
161                                 unsigned done = bh->b_size -
162                                                 (bh_max - (pos - first));
163                                 bh->b_blocknr += done >> blkbits;
164                                 bh->b_size -= done;
165                         }
166
167                         hole = rw == READ && !buffer_written(bh);
168                         if (hole) {
169                                 size = bh->b_size - first;
170                         } else {
171                                 dax_unmap_atomic(bdev, &dax);
172                                 dax.sector = to_sector(bh, inode);
173                                 dax.size = bh->b_size;
174                                 map_len = dax_map_atomic(bdev, &dax);
175                                 if (map_len < 0) {
176                                         rc = map_len;
177                                         break;
178                                 }
179                                 dax.addr += first;
180                                 size = map_len - first;
181                         }
182                         max = min(pos + size, end);
183                 }
184
185                 if (iov_iter_rw(iter) == WRITE) {
186                         len = copy_from_iter_pmem(dax.addr, max - pos, iter);
187                         need_wmb = true;
188                 } else if (!hole)
189                         len = copy_to_iter((void __force *) dax.addr, max - pos,
190                                         iter);
191                 else
192                         len = iov_iter_zero(max - pos, iter);
193
194                 if (!len) {
195                         rc = -EFAULT;
196                         break;
197                 }
198
199                 pos += len;
200                 if (!IS_ERR(dax.addr))
201                         dax.addr += len;
202         }
203
204         if (need_wmb)
205                 wmb_pmem();
206         dax_unmap_atomic(bdev, &dax);
207
208         return (pos == start) ? rc : pos - start;
209 }
210
211 /**
212  * dax_do_io - Perform I/O to a DAX file
213  * @iocb: The control block for this I/O
214  * @inode: The file which the I/O is directed at
215  * @iter: The addresses to do I/O from or to
216  * @get_block: The filesystem method used to translate file offsets to blocks
217  * @end_io: A filesystem callback for I/O completion
218  * @flags: See below
219  *
220  * This function uses the same locking scheme as do_blockdev_direct_IO:
221  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
222  * caller for writes.  For reads, we take and release the i_mutex ourselves.
223  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
224  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
225  * is in progress.
226  */
227 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
228                   struct iov_iter *iter, get_block_t get_block,
229                   dio_iodone_t end_io, int flags)
230 {
231         struct buffer_head bh;
232         ssize_t retval = -EINVAL;
233         loff_t pos = iocb->ki_pos;
234         loff_t end = pos + iov_iter_count(iter);
235
236         memset(&bh, 0, sizeof(bh));
237         bh.b_bdev = inode->i_sb->s_bdev;
238
239         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
240                 inode_lock(inode);
241
242         /* Protects against truncate */
243         if (!(flags & DIO_SKIP_DIO_COUNT))
244                 inode_dio_begin(inode);
245
246         retval = dax_io(inode, iter, pos, end, get_block, &bh);
247
248         if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
249                 inode_unlock(inode);
250
251         if (end_io) {
252                 int err;
253
254                 err = end_io(iocb, pos, retval, bh.b_private);
255                 if (err)
256                         retval = err;
257         }
258
259         if (!(flags & DIO_SKIP_DIO_COUNT))
260                 inode_dio_end(inode);
261         return retval;
262 }
263 EXPORT_SYMBOL_GPL(dax_do_io);
264
265 /*
266  * The user has performed a load from a hole in the file.  Allocating
267  * a new page in the file would cause excessive storage usage for
268  * workloads with sparse files.  We allocate a page cache page instead.
269  * We'll kick it out of the page cache if it's ever written to,
270  * otherwise it will simply fall out of the page cache under memory
271  * pressure without ever having been dirtied.
272  */
273 static int dax_load_hole(struct address_space *mapping, struct page *page,
274                                                         struct vm_fault *vmf)
275 {
276         if (!page)
277                 page = find_or_create_page(mapping, vmf->pgoff,
278                                                 GFP_KERNEL | __GFP_ZERO);
279         if (!page)
280                 return VM_FAULT_OOM;
281
282         vmf->page = page;
283         return VM_FAULT_LOCKED;
284 }
285
286 static int copy_user_bh(struct page *to, struct inode *inode,
287                 struct buffer_head *bh, unsigned long vaddr)
288 {
289         struct blk_dax_ctl dax = {
290                 .sector = to_sector(bh, inode),
291                 .size = bh->b_size,
292         };
293         struct block_device *bdev = bh->b_bdev;
294         void *vto;
295
296         if (dax_map_atomic(bdev, &dax) < 0)
297                 return PTR_ERR(dax.addr);
298         vto = kmap_atomic(to);
299         copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
300         kunmap_atomic(vto);
301         dax_unmap_atomic(bdev, &dax);
302         return 0;
303 }
304
305 #define NO_SECTOR -1
306 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
307
308 static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
309                 sector_t sector, bool pmd_entry, bool dirty)
310 {
311         struct radix_tree_root *page_tree = &mapping->page_tree;
312         pgoff_t pmd_index = DAX_PMD_INDEX(index);
313         int type, error = 0;
314         void *entry;
315
316         WARN_ON_ONCE(pmd_entry && !dirty);
317         if (dirty)
318                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
319
320         spin_lock_irq(&mapping->tree_lock);
321
322         entry = radix_tree_lookup(page_tree, pmd_index);
323         if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
324                 index = pmd_index;
325                 goto dirty;
326         }
327
328         entry = radix_tree_lookup(page_tree, index);
329         if (entry) {
330                 type = RADIX_DAX_TYPE(entry);
331                 if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
332                                         type != RADIX_DAX_PMD)) {
333                         error = -EIO;
334                         goto unlock;
335                 }
336
337                 if (!pmd_entry || type == RADIX_DAX_PMD)
338                         goto dirty;
339
340                 /*
341                  * We only insert dirty PMD entries into the radix tree.  This
342                  * means we don't need to worry about removing a dirty PTE
343                  * entry and inserting a clean PMD entry, thus reducing the
344                  * range we would flush with a follow-up fsync/msync call.
345                  */
346                 radix_tree_delete(&mapping->page_tree, index);
347                 mapping->nrexceptional--;
348         }
349
350         if (sector == NO_SECTOR) {
351                 /*
352                  * This can happen during correct operation if our pfn_mkwrite
353                  * fault raced against a hole punch operation.  If this
354                  * happens the pte that was hole punched will have been
355                  * unmapped and the radix tree entry will have been removed by
356                  * the time we are called, but the call will still happen.  We
357                  * will return all the way up to wp_pfn_shared(), where the
358                  * pte_same() check will fail, eventually causing page fault
359                  * to be retried by the CPU.
360                  */
361                 goto unlock;
362         }
363
364         error = radix_tree_insert(page_tree, index,
365                         RADIX_DAX_ENTRY(sector, pmd_entry));
366         if (error)
367                 goto unlock;
368
369         mapping->nrexceptional++;
370  dirty:
371         if (dirty)
372                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
373  unlock:
374         spin_unlock_irq(&mapping->tree_lock);
375         return error;
376 }
377
378 static int dax_writeback_one(struct block_device *bdev,
379                 struct address_space *mapping, pgoff_t index, void *entry)
380 {
381         struct radix_tree_root *page_tree = &mapping->page_tree;
382         int type = RADIX_DAX_TYPE(entry);
383         struct radix_tree_node *node;
384         struct blk_dax_ctl dax;
385         void **slot;
386         int ret = 0;
387
388         spin_lock_irq(&mapping->tree_lock);
389         /*
390          * Regular page slots are stabilized by the page lock even
391          * without the tree itself locked.  These unlocked entries
392          * need verification under the tree lock.
393          */
394         if (!__radix_tree_lookup(page_tree, index, &node, &slot))
395                 goto unlock;
396         if (*slot != entry)
397                 goto unlock;
398
399         /* another fsync thread may have already written back this entry */
400         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
401                 goto unlock;
402
403         if (WARN_ON_ONCE(type != RADIX_DAX_PTE && type != RADIX_DAX_PMD)) {
404                 ret = -EIO;
405                 goto unlock;
406         }
407
408         dax.sector = RADIX_DAX_SECTOR(entry);
409         dax.size = (type == RADIX_DAX_PMD ? PMD_SIZE : PAGE_SIZE);
410         spin_unlock_irq(&mapping->tree_lock);
411
412         /*
413          * We cannot hold tree_lock while calling dax_map_atomic() because it
414          * eventually calls cond_resched().
415          */
416         ret = dax_map_atomic(bdev, &dax);
417         if (ret < 0)
418                 return ret;
419
420         if (WARN_ON_ONCE(ret < dax.size)) {
421                 ret = -EIO;
422                 goto unmap;
423         }
424
425         wb_cache_pmem(dax.addr, dax.size);
426
427         spin_lock_irq(&mapping->tree_lock);
428         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
429         spin_unlock_irq(&mapping->tree_lock);
430  unmap:
431         dax_unmap_atomic(bdev, &dax);
432         return ret;
433
434  unlock:
435         spin_unlock_irq(&mapping->tree_lock);
436         return ret;
437 }
438
439 /*
440  * Flush the mapping to the persistent domain within the byte range of [start,
441  * end]. This is required by data integrity operations to ensure file data is
442  * on persistent storage prior to completion of the operation.
443  */
444 int dax_writeback_mapping_range(struct address_space *mapping,
445                 struct block_device *bdev, struct writeback_control *wbc)
446 {
447         struct inode *inode = mapping->host;
448         pgoff_t start_index, end_index, pmd_index;
449         pgoff_t indices[PAGEVEC_SIZE];
450         struct pagevec pvec;
451         bool done = false;
452         int i, ret = 0;
453         void *entry;
454
455         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
456                 return -EIO;
457
458         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
459                 return 0;
460
461         start_index = wbc->range_start >> PAGE_SHIFT;
462         end_index = wbc->range_end >> PAGE_SHIFT;
463         pmd_index = DAX_PMD_INDEX(start_index);
464
465         rcu_read_lock();
466         entry = radix_tree_lookup(&mapping->page_tree, pmd_index);
467         rcu_read_unlock();
468
469         /* see if the start of our range is covered by a PMD entry */
470         if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD)
471                 start_index = pmd_index;
472
473         tag_pages_for_writeback(mapping, start_index, end_index);
474
475         pagevec_init(&pvec, 0);
476         while (!done) {
477                 pvec.nr = find_get_entries_tag(mapping, start_index,
478                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
479                                 pvec.pages, indices);
480
481                 if (pvec.nr == 0)
482                         break;
483
484                 for (i = 0; i < pvec.nr; i++) {
485                         if (indices[i] > end_index) {
486                                 done = true;
487                                 break;
488                         }
489
490                         ret = dax_writeback_one(bdev, mapping, indices[i],
491                                         pvec.pages[i]);
492                         if (ret < 0)
493                                 return ret;
494                 }
495         }
496         wmb_pmem();
497         return 0;
498 }
499 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
500
501 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
502                         struct vm_area_struct *vma, struct vm_fault *vmf)
503 {
504         unsigned long vaddr = (unsigned long)vmf->virtual_address;
505         struct address_space *mapping = inode->i_mapping;
506         struct block_device *bdev = bh->b_bdev;
507         struct blk_dax_ctl dax = {
508                 .sector = to_sector(bh, inode),
509                 .size = bh->b_size,
510         };
511         int error;
512
513         i_mmap_lock_read(mapping);
514
515         if (dax_map_atomic(bdev, &dax) < 0) {
516                 error = PTR_ERR(dax.addr);
517                 goto out;
518         }
519         dax_unmap_atomic(bdev, &dax);
520
521         error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
522                         vmf->flags & FAULT_FLAG_WRITE);
523         if (error)
524                 goto out;
525
526         error = vm_insert_mixed(vma, vaddr, dax.pfn);
527
528  out:
529         i_mmap_unlock_read(mapping);
530
531         return error;
532 }
533
534 /**
535  * __dax_fault - handle a page fault on a DAX file
536  * @vma: The virtual memory area where the fault occurred
537  * @vmf: The description of the fault
538  * @get_block: The filesystem method used to translate file offsets to blocks
539  *
540  * When a page fault occurs, filesystems may call this helper in their
541  * fault handler for DAX files. __dax_fault() assumes the caller has done all
542  * the necessary locking for the page fault to proceed successfully.
543  */
544 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
545                         get_block_t get_block)
546 {
547         struct file *file = vma->vm_file;
548         struct address_space *mapping = file->f_mapping;
549         struct inode *inode = mapping->host;
550         struct page *page;
551         struct buffer_head bh;
552         unsigned long vaddr = (unsigned long)vmf->virtual_address;
553         unsigned blkbits = inode->i_blkbits;
554         sector_t block;
555         pgoff_t size;
556         int error;
557         int major = 0;
558
559         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
560         if (vmf->pgoff >= size)
561                 return VM_FAULT_SIGBUS;
562
563         memset(&bh, 0, sizeof(bh));
564         block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
565         bh.b_bdev = inode->i_sb->s_bdev;
566         bh.b_size = PAGE_SIZE;
567
568  repeat:
569         page = find_get_page(mapping, vmf->pgoff);
570         if (page) {
571                 if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
572                         put_page(page);
573                         return VM_FAULT_RETRY;
574                 }
575                 if (unlikely(page->mapping != mapping)) {
576                         unlock_page(page);
577                         put_page(page);
578                         goto repeat;
579                 }
580         }
581
582         error = get_block(inode, block, &bh, 0);
583         if (!error && (bh.b_size < PAGE_SIZE))
584                 error = -EIO;           /* fs corruption? */
585         if (error)
586                 goto unlock_page;
587
588         if (!buffer_mapped(&bh) && !vmf->cow_page) {
589                 if (vmf->flags & FAULT_FLAG_WRITE) {
590                         error = get_block(inode, block, &bh, 1);
591                         count_vm_event(PGMAJFAULT);
592                         mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
593                         major = VM_FAULT_MAJOR;
594                         if (!error && (bh.b_size < PAGE_SIZE))
595                                 error = -EIO;
596                         if (error)
597                                 goto unlock_page;
598                 } else {
599                         return dax_load_hole(mapping, page, vmf);
600                 }
601         }
602
603         if (vmf->cow_page) {
604                 struct page *new_page = vmf->cow_page;
605                 if (buffer_written(&bh))
606                         error = copy_user_bh(new_page, inode, &bh, vaddr);
607                 else
608                         clear_user_highpage(new_page, vaddr);
609                 if (error)
610                         goto unlock_page;
611                 vmf->page = page;
612                 if (!page)
613                         i_mmap_lock_read(mapping);
614                 return VM_FAULT_LOCKED;
615         }
616
617         /* Check we didn't race with a read fault installing a new page */
618         if (!page && major)
619                 page = find_lock_page(mapping, vmf->pgoff);
620
621         if (page) {
622                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
623                                                         PAGE_SIZE, 0);
624                 delete_from_page_cache(page);
625                 unlock_page(page);
626                 put_page(page);
627                 page = NULL;
628         }
629
630         /* Filesystem should not return unwritten buffers to us! */
631         WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
632         error = dax_insert_mapping(inode, &bh, vma, vmf);
633
634  out:
635         if (error == -ENOMEM)
636                 return VM_FAULT_OOM | major;
637         /* -EBUSY is fine, somebody else faulted on the same PTE */
638         if ((error < 0) && (error != -EBUSY))
639                 return VM_FAULT_SIGBUS | major;
640         return VM_FAULT_NOPAGE | major;
641
642  unlock_page:
643         if (page) {
644                 unlock_page(page);
645                 put_page(page);
646         }
647         goto out;
648 }
649 EXPORT_SYMBOL(__dax_fault);
650
651 /**
652  * dax_fault - handle a page fault on a DAX file
653  * @vma: The virtual memory area where the fault occurred
654  * @vmf: The description of the fault
655  * @get_block: The filesystem method used to translate file offsets to blocks
656  *
657  * When a page fault occurs, filesystems may call this helper in their
658  * fault handler for DAX files.
659  */
660 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
661               get_block_t get_block)
662 {
663         int result;
664         struct super_block *sb = file_inode(vma->vm_file)->i_sb;
665
666         if (vmf->flags & FAULT_FLAG_WRITE) {
667                 sb_start_pagefault(sb);
668                 file_update_time(vma->vm_file);
669         }
670         result = __dax_fault(vma, vmf, get_block);
671         if (vmf->flags & FAULT_FLAG_WRITE)
672                 sb_end_pagefault(sb);
673
674         return result;
675 }
676 EXPORT_SYMBOL_GPL(dax_fault);
677
678 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
679 /*
680  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
681  * more often than one might expect in the below function.
682  */
683 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
684
685 static void __dax_dbg(struct buffer_head *bh, unsigned long address,
686                 const char *reason, const char *fn)
687 {
688         if (bh) {
689                 char bname[BDEVNAME_SIZE];
690                 bdevname(bh->b_bdev, bname);
691                 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
692                         "length %zd fallback: %s\n", fn, current->comm,
693                         address, bname, bh->b_state, (u64)bh->b_blocknr,
694                         bh->b_size, reason);
695         } else {
696                 pr_debug("%s: %s addr: %lx fallback: %s\n", fn,
697                         current->comm, address, reason);
698         }
699 }
700
701 #define dax_pmd_dbg(bh, address, reason)        __dax_dbg(bh, address, reason, "dax_pmd")
702
703 int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
704                 pmd_t *pmd, unsigned int flags, get_block_t get_block)
705 {
706         struct file *file = vma->vm_file;
707         struct address_space *mapping = file->f_mapping;
708         struct inode *inode = mapping->host;
709         struct buffer_head bh;
710         unsigned blkbits = inode->i_blkbits;
711         unsigned long pmd_addr = address & PMD_MASK;
712         bool write = flags & FAULT_FLAG_WRITE;
713         struct block_device *bdev;
714         pgoff_t size, pgoff;
715         sector_t block;
716         int error, result = 0;
717         bool alloc = false;
718
719         /* dax pmd mappings require pfn_t_devmap() */
720         if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
721                 return VM_FAULT_FALLBACK;
722
723         /* Fall back to PTEs if we're going to COW */
724         if (write && !(vma->vm_flags & VM_SHARED)) {
725                 split_huge_pmd(vma, pmd, address);
726                 dax_pmd_dbg(NULL, address, "cow write");
727                 return VM_FAULT_FALLBACK;
728         }
729         /* If the PMD would extend outside the VMA */
730         if (pmd_addr < vma->vm_start) {
731                 dax_pmd_dbg(NULL, address, "vma start unaligned");
732                 return VM_FAULT_FALLBACK;
733         }
734         if ((pmd_addr + PMD_SIZE) > vma->vm_end) {
735                 dax_pmd_dbg(NULL, address, "vma end unaligned");
736                 return VM_FAULT_FALLBACK;
737         }
738
739         pgoff = linear_page_index(vma, pmd_addr);
740         size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
741         if (pgoff >= size)
742                 return VM_FAULT_SIGBUS;
743         /* If the PMD would cover blocks out of the file */
744         if ((pgoff | PG_PMD_COLOUR) >= size) {
745                 dax_pmd_dbg(NULL, address,
746                                 "offset + huge page size > file size");
747                 return VM_FAULT_FALLBACK;
748         }
749
750         memset(&bh, 0, sizeof(bh));
751         bh.b_bdev = inode->i_sb->s_bdev;
752         block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
753
754         bh.b_size = PMD_SIZE;
755
756         if (get_block(inode, block, &bh, 0) != 0)
757                 return VM_FAULT_SIGBUS;
758
759         if (!buffer_mapped(&bh) && write) {
760                 if (get_block(inode, block, &bh, 1) != 0)
761                         return VM_FAULT_SIGBUS;
762                 alloc = true;
763                 WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
764         }
765
766         bdev = bh.b_bdev;
767
768         /*
769          * If the filesystem isn't willing to tell us the length of a hole,
770          * just fall back to PTEs.  Calling get_block 512 times in a loop
771          * would be silly.
772          */
773         if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) {
774                 dax_pmd_dbg(&bh, address, "allocated block too small");
775                 return VM_FAULT_FALLBACK;
776         }
777
778         /*
779          * If we allocated new storage, make sure no process has any
780          * zero pages covering this hole
781          */
782         if (alloc) {
783                 loff_t lstart = pgoff << PAGE_SHIFT;
784                 loff_t lend = lstart + PMD_SIZE - 1; /* inclusive */
785
786                 truncate_pagecache_range(inode, lstart, lend);
787         }
788
789         i_mmap_lock_read(mapping);
790
791         if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
792                 spinlock_t *ptl;
793                 pmd_t entry;
794                 struct page *zero_page = get_huge_zero_page();
795
796                 if (unlikely(!zero_page)) {
797                         dax_pmd_dbg(&bh, address, "no zero page");
798                         goto fallback;
799                 }
800
801                 ptl = pmd_lock(vma->vm_mm, pmd);
802                 if (!pmd_none(*pmd)) {
803                         spin_unlock(ptl);
804                         dax_pmd_dbg(&bh, address, "pmd already present");
805                         goto fallback;
806                 }
807
808                 dev_dbg(part_to_dev(bdev->bd_part),
809                                 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
810                                 __func__, current->comm, address,
811                                 (unsigned long long) to_sector(&bh, inode));
812
813                 entry = mk_pmd(zero_page, vma->vm_page_prot);
814                 entry = pmd_mkhuge(entry);
815                 set_pmd_at(vma->vm_mm, pmd_addr, pmd, entry);
816                 result = VM_FAULT_NOPAGE;
817                 spin_unlock(ptl);
818         } else {
819                 struct blk_dax_ctl dax = {
820                         .sector = to_sector(&bh, inode),
821                         .size = PMD_SIZE,
822                 };
823                 long length = dax_map_atomic(bdev, &dax);
824
825                 if (length < 0) {
826                         dax_pmd_dbg(&bh, address, "dax-error fallback");
827                         goto fallback;
828                 }
829                 if (length < PMD_SIZE) {
830                         dax_pmd_dbg(&bh, address, "dax-length too small");
831                         dax_unmap_atomic(bdev, &dax);
832                         goto fallback;
833                 }
834                 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR) {
835                         dax_pmd_dbg(&bh, address, "pfn unaligned");
836                         dax_unmap_atomic(bdev, &dax);
837                         goto fallback;
838                 }
839
840                 if (!pfn_t_devmap(dax.pfn)) {
841                         dax_unmap_atomic(bdev, &dax);
842                         dax_pmd_dbg(&bh, address, "pfn not in memmap");
843                         goto fallback;
844                 }
845                 dax_unmap_atomic(bdev, &dax);
846
847                 /*
848                  * For PTE faults we insert a radix tree entry for reads, and
849                  * leave it clean.  Then on the first write we dirty the radix
850                  * tree entry via the dax_pfn_mkwrite() path.  This sequence
851                  * allows the dax_pfn_mkwrite() call to be simpler and avoid a
852                  * call into get_block() to translate the pgoff to a sector in
853                  * order to be able to create a new radix tree entry.
854                  *
855                  * The PMD path doesn't have an equivalent to
856                  * dax_pfn_mkwrite(), though, so for a read followed by a
857                  * write we traverse all the way through __dax_pmd_fault()
858                  * twice.  This means we can just skip inserting a radix tree
859                  * entry completely on the initial read and just wait until
860                  * the write to insert a dirty entry.
861                  */
862                 if (write) {
863                         error = dax_radix_entry(mapping, pgoff, dax.sector,
864                                         true, true);
865                         if (error) {
866                                 dax_pmd_dbg(&bh, address,
867                                                 "PMD radix insertion failed");
868                                 goto fallback;
869                         }
870                 }
871
872                 dev_dbg(part_to_dev(bdev->bd_part),
873                                 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
874                                 __func__, current->comm, address,
875                                 pfn_t_to_pfn(dax.pfn),
876                                 (unsigned long long) dax.sector);
877                 result |= vmf_insert_pfn_pmd(vma, address, pmd,
878                                 dax.pfn, write);
879         }
880
881  out:
882         i_mmap_unlock_read(mapping);
883
884         return result;
885
886  fallback:
887         count_vm_event(THP_FAULT_FALLBACK);
888         result = VM_FAULT_FALLBACK;
889         goto out;
890 }
891 EXPORT_SYMBOL_GPL(__dax_pmd_fault);
892
893 /**
894  * dax_pmd_fault - handle a PMD fault on a DAX file
895  * @vma: The virtual memory area where the fault occurred
896  * @vmf: The description of the fault
897  * @get_block: The filesystem method used to translate file offsets to blocks
898  *
899  * When a page fault occurs, filesystems may call this helper in their
900  * pmd_fault handler for DAX files.
901  */
902 int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
903                         pmd_t *pmd, unsigned int flags, get_block_t get_block)
904 {
905         int result;
906         struct super_block *sb = file_inode(vma->vm_file)->i_sb;
907
908         if (flags & FAULT_FLAG_WRITE) {
909                 sb_start_pagefault(sb);
910                 file_update_time(vma->vm_file);
911         }
912         result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
913         if (flags & FAULT_FLAG_WRITE)
914                 sb_end_pagefault(sb);
915
916         return result;
917 }
918 EXPORT_SYMBOL_GPL(dax_pmd_fault);
919 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
920
921 /**
922  * dax_pfn_mkwrite - handle first write to DAX page
923  * @vma: The virtual memory area where the fault occurred
924  * @vmf: The description of the fault
925  */
926 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
927 {
928         struct file *file = vma->vm_file;
929         int error;
930
931         /*
932          * We pass NO_SECTOR to dax_radix_entry() because we expect that a
933          * RADIX_DAX_PTE entry already exists in the radix tree from a
934          * previous call to __dax_fault().  We just want to look up that PTE
935          * entry using vmf->pgoff and make sure the dirty tag is set.  This
936          * saves us from having to make a call to get_block() here to look
937          * up the sector.
938          */
939         error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
940                         true);
941
942         if (error == -ENOMEM)
943                 return VM_FAULT_OOM;
944         if (error)
945                 return VM_FAULT_SIGBUS;
946         return VM_FAULT_NOPAGE;
947 }
948 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
949
950 static bool dax_range_is_aligned(struct block_device *bdev,
951                                  unsigned int offset, unsigned int length)
952 {
953         unsigned short sector_size = bdev_logical_block_size(bdev);
954
955         if (!IS_ALIGNED(offset, sector_size))
956                 return false;
957         if (!IS_ALIGNED(length, sector_size))
958                 return false;
959
960         return true;
961 }
962
963 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
964                 unsigned int offset, unsigned int length)
965 {
966         struct blk_dax_ctl dax = {
967                 .sector         = sector,
968                 .size           = PAGE_SIZE,
969         };
970
971         if (dax_range_is_aligned(bdev, offset, length)) {
972                 sector_t start_sector = dax.sector + (offset >> 9);
973
974                 return blkdev_issue_zeroout(bdev, start_sector,
975                                 length >> 9, GFP_NOFS, true);
976         } else {
977                 if (dax_map_atomic(bdev, &dax) < 0)
978                         return PTR_ERR(dax.addr);
979                 clear_pmem(dax.addr + offset, length);
980                 wmb_pmem();
981                 dax_unmap_atomic(bdev, &dax);
982         }
983         return 0;
984 }
985 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
986
987 /**
988  * dax_zero_page_range - zero a range within a page of a DAX file
989  * @inode: The file being truncated
990  * @from: The file offset that is being truncated to
991  * @length: The number of bytes to zero
992  * @get_block: The filesystem method used to translate file offsets to blocks
993  *
994  * This function can be called by a filesystem when it is zeroing part of a
995  * page in a DAX file.  This is intended for hole-punch operations.  If
996  * you are truncating a file, the helper function dax_truncate_page() may be
997  * more convenient.
998  */
999 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
1000                                                         get_block_t get_block)
1001 {
1002         struct buffer_head bh;
1003         pgoff_t index = from >> PAGE_SHIFT;
1004         unsigned offset = from & (PAGE_SIZE-1);
1005         int err;
1006
1007         /* Block boundary? Nothing to do */
1008         if (!length)
1009                 return 0;
1010         BUG_ON((offset + length) > PAGE_SIZE);
1011
1012         memset(&bh, 0, sizeof(bh));
1013         bh.b_bdev = inode->i_sb->s_bdev;
1014         bh.b_size = PAGE_SIZE;
1015         err = get_block(inode, index, &bh, 0);
1016         if (err < 0 || !buffer_written(&bh))
1017                 return err;
1018
1019         return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
1020                         offset, length);
1021 }
1022 EXPORT_SYMBOL_GPL(dax_zero_page_range);
1023
1024 /**
1025  * dax_truncate_page - handle a partial page being truncated in a DAX file
1026  * @inode: The file being truncated
1027  * @from: The file offset that is being truncated to
1028  * @get_block: The filesystem method used to translate file offsets to blocks
1029  *
1030  * Similar to block_truncate_page(), this function can be called by a
1031  * filesystem when it is truncating a DAX file to handle the partial page.
1032  */
1033 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
1034 {
1035         unsigned length = PAGE_ALIGN(from) - from;
1036         return dax_zero_page_range(inode, from, length, get_block);
1037 }
1038 EXPORT_SYMBOL_GPL(dax_truncate_page);