]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/dax.c
Merge branch 'idr-4.11' of git://git.infradead.org/users/willy/linux-dax
[karo-tx-linux.git] / fs / dax.c
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
21 #include <linux/fs.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
36 #include "internal.h"
37
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
40
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
45 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
46
47 static int __init init_dax_wait_table(void)
48 {
49         int i;
50
51         for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
52                 init_waitqueue_head(wait_table + i);
53         return 0;
54 }
55 fs_initcall(init_dax_wait_table);
56
57 static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
58 {
59         struct request_queue *q = bdev->bd_queue;
60         long rc = -EIO;
61
62         dax->addr = ERR_PTR(-EIO);
63         if (blk_queue_enter(q, true) != 0)
64                 return rc;
65
66         rc = bdev_direct_access(bdev, dax);
67         if (rc < 0) {
68                 dax->addr = ERR_PTR(rc);
69                 blk_queue_exit(q);
70                 return rc;
71         }
72         return rc;
73 }
74
75 static void dax_unmap_atomic(struct block_device *bdev,
76                 const struct blk_dax_ctl *dax)
77 {
78         if (IS_ERR(dax->addr))
79                 return;
80         blk_queue_exit(bdev->bd_queue);
81 }
82
83 static int dax_is_pmd_entry(void *entry)
84 {
85         return (unsigned long)entry & RADIX_DAX_PMD;
86 }
87
88 static int dax_is_pte_entry(void *entry)
89 {
90         return !((unsigned long)entry & RADIX_DAX_PMD);
91 }
92
93 static int dax_is_zero_entry(void *entry)
94 {
95         return (unsigned long)entry & RADIX_DAX_HZP;
96 }
97
98 static int dax_is_empty_entry(void *entry)
99 {
100         return (unsigned long)entry & RADIX_DAX_EMPTY;
101 }
102
103 struct page *read_dax_sector(struct block_device *bdev, sector_t n)
104 {
105         struct page *page = alloc_pages(GFP_KERNEL, 0);
106         struct blk_dax_ctl dax = {
107                 .size = PAGE_SIZE,
108                 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
109         };
110         long rc;
111
112         if (!page)
113                 return ERR_PTR(-ENOMEM);
114
115         rc = dax_map_atomic(bdev, &dax);
116         if (rc < 0)
117                 return ERR_PTR(rc);
118         memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
119         dax_unmap_atomic(bdev, &dax);
120         return page;
121 }
122
123 /*
124  * DAX radix tree locking
125  */
126 struct exceptional_entry_key {
127         struct address_space *mapping;
128         pgoff_t entry_start;
129 };
130
131 struct wait_exceptional_entry_queue {
132         wait_queue_t wait;
133         struct exceptional_entry_key key;
134 };
135
136 static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
137                 pgoff_t index, void *entry, struct exceptional_entry_key *key)
138 {
139         unsigned long hash;
140
141         /*
142          * If 'entry' is a PMD, align the 'index' that we use for the wait
143          * queue to the start of that PMD.  This ensures that all offsets in
144          * the range covered by the PMD map to the same bit lock.
145          */
146         if (dax_is_pmd_entry(entry))
147                 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
148
149         key->mapping = mapping;
150         key->entry_start = index;
151
152         hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
153         return wait_table + hash;
154 }
155
156 static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
157                                        int sync, void *keyp)
158 {
159         struct exceptional_entry_key *key = keyp;
160         struct wait_exceptional_entry_queue *ewait =
161                 container_of(wait, struct wait_exceptional_entry_queue, wait);
162
163         if (key->mapping != ewait->key.mapping ||
164             key->entry_start != ewait->key.entry_start)
165                 return 0;
166         return autoremove_wake_function(wait, mode, sync, NULL);
167 }
168
169 /*
170  * Check whether the given slot is locked. The function must be called with
171  * mapping->tree_lock held
172  */
173 static inline int slot_locked(struct address_space *mapping, void **slot)
174 {
175         unsigned long entry = (unsigned long)
176                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
177         return entry & RADIX_DAX_ENTRY_LOCK;
178 }
179
180 /*
181  * Mark the given slot is locked. The function must be called with
182  * mapping->tree_lock held
183  */
184 static inline void *lock_slot(struct address_space *mapping, void **slot)
185 {
186         unsigned long entry = (unsigned long)
187                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
188
189         entry |= RADIX_DAX_ENTRY_LOCK;
190         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
191         return (void *)entry;
192 }
193
194 /*
195  * Mark the given slot is unlocked. The function must be called with
196  * mapping->tree_lock held
197  */
198 static inline void *unlock_slot(struct address_space *mapping, void **slot)
199 {
200         unsigned long entry = (unsigned long)
201                 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
202
203         entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
204         radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
205         return (void *)entry;
206 }
207
208 /*
209  * Lookup entry in radix tree, wait for it to become unlocked if it is
210  * exceptional entry and return it. The caller must call
211  * put_unlocked_mapping_entry() when he decided not to lock the entry or
212  * put_locked_mapping_entry() when he locked the entry and now wants to
213  * unlock it.
214  *
215  * The function must be called with mapping->tree_lock held.
216  */
217 static void *get_unlocked_mapping_entry(struct address_space *mapping,
218                                         pgoff_t index, void ***slotp)
219 {
220         void *entry, **slot;
221         struct wait_exceptional_entry_queue ewait;
222         wait_queue_head_t *wq;
223
224         init_wait(&ewait.wait);
225         ewait.wait.func = wake_exceptional_entry_func;
226
227         for (;;) {
228                 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
229                                           &slot);
230                 if (!entry || !radix_tree_exceptional_entry(entry) ||
231                     !slot_locked(mapping, slot)) {
232                         if (slotp)
233                                 *slotp = slot;
234                         return entry;
235                 }
236
237                 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
238                 prepare_to_wait_exclusive(wq, &ewait.wait,
239                                           TASK_UNINTERRUPTIBLE);
240                 spin_unlock_irq(&mapping->tree_lock);
241                 schedule();
242                 finish_wait(wq, &ewait.wait);
243                 spin_lock_irq(&mapping->tree_lock);
244         }
245 }
246
247 static void dax_unlock_mapping_entry(struct address_space *mapping,
248                                      pgoff_t index)
249 {
250         void *entry, **slot;
251
252         spin_lock_irq(&mapping->tree_lock);
253         entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
254         if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
255                          !slot_locked(mapping, slot))) {
256                 spin_unlock_irq(&mapping->tree_lock);
257                 return;
258         }
259         unlock_slot(mapping, slot);
260         spin_unlock_irq(&mapping->tree_lock);
261         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
262 }
263
264 static void put_locked_mapping_entry(struct address_space *mapping,
265                                      pgoff_t index, void *entry)
266 {
267         if (!radix_tree_exceptional_entry(entry)) {
268                 unlock_page(entry);
269                 put_page(entry);
270         } else {
271                 dax_unlock_mapping_entry(mapping, index);
272         }
273 }
274
275 /*
276  * Called when we are done with radix tree entry we looked up via
277  * get_unlocked_mapping_entry() and which we didn't lock in the end.
278  */
279 static void put_unlocked_mapping_entry(struct address_space *mapping,
280                                        pgoff_t index, void *entry)
281 {
282         if (!radix_tree_exceptional_entry(entry))
283                 return;
284
285         /* We have to wake up next waiter for the radix tree entry lock */
286         dax_wake_mapping_entry_waiter(mapping, index, entry, false);
287 }
288
289 /*
290  * Find radix tree entry at given index. If it points to a page, return with
291  * the page locked. If it points to the exceptional entry, return with the
292  * radix tree entry locked. If the radix tree doesn't contain given index,
293  * create empty exceptional entry for the index and return with it locked.
294  *
295  * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
296  * either return that locked entry or will return an error.  This error will
297  * happen if there are any 4k entries (either zero pages or DAX entries)
298  * within the 2MiB range that we are requesting.
299  *
300  * We always favor 4k entries over 2MiB entries. There isn't a flow where we
301  * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
302  * insertion will fail if it finds any 4k entries already in the tree, and a
303  * 4k insertion will cause an existing 2MiB entry to be unmapped and
304  * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
305  * well as 2MiB empty entries.
306  *
307  * The exception to this downgrade path is for 2MiB DAX PMD entries that have
308  * real storage backing them.  We will leave these real 2MiB DAX entries in
309  * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
310  *
311  * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
312  * persistent memory the benefit is doubtful. We can add that later if we can
313  * show it helps.
314  */
315 static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
316                 unsigned long size_flag)
317 {
318         bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
319         void *entry, **slot;
320
321 restart:
322         spin_lock_irq(&mapping->tree_lock);
323         entry = get_unlocked_mapping_entry(mapping, index, &slot);
324
325         if (entry) {
326                 if (size_flag & RADIX_DAX_PMD) {
327                         if (!radix_tree_exceptional_entry(entry) ||
328                             dax_is_pte_entry(entry)) {
329                                 put_unlocked_mapping_entry(mapping, index,
330                                                 entry);
331                                 entry = ERR_PTR(-EEXIST);
332                                 goto out_unlock;
333                         }
334                 } else { /* trying to grab a PTE entry */
335                         if (radix_tree_exceptional_entry(entry) &&
336                             dax_is_pmd_entry(entry) &&
337                             (dax_is_zero_entry(entry) ||
338                              dax_is_empty_entry(entry))) {
339                                 pmd_downgrade = true;
340                         }
341                 }
342         }
343
344         /* No entry for given index? Make sure radix tree is big enough. */
345         if (!entry || pmd_downgrade) {
346                 int err;
347
348                 if (pmd_downgrade) {
349                         /*
350                          * Make sure 'entry' remains valid while we drop
351                          * mapping->tree_lock.
352                          */
353                         entry = lock_slot(mapping, slot);
354                 }
355
356                 spin_unlock_irq(&mapping->tree_lock);
357                 /*
358                  * Besides huge zero pages the only other thing that gets
359                  * downgraded are empty entries which don't need to be
360                  * unmapped.
361                  */
362                 if (pmd_downgrade && dax_is_zero_entry(entry))
363                         unmap_mapping_range(mapping,
364                                 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
365
366                 err = radix_tree_preload(
367                                 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
368                 if (err) {
369                         if (pmd_downgrade)
370                                 put_locked_mapping_entry(mapping, index, entry);
371                         return ERR_PTR(err);
372                 }
373                 spin_lock_irq(&mapping->tree_lock);
374
375                 if (pmd_downgrade) {
376                         radix_tree_delete(&mapping->page_tree, index);
377                         mapping->nrexceptional--;
378                         dax_wake_mapping_entry_waiter(mapping, index, entry,
379                                         true);
380                 }
381
382                 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
383
384                 err = __radix_tree_insert(&mapping->page_tree, index,
385                                 dax_radix_order(entry), entry);
386                 radix_tree_preload_end();
387                 if (err) {
388                         spin_unlock_irq(&mapping->tree_lock);
389                         /*
390                          * Someone already created the entry?  This is a
391                          * normal failure when inserting PMDs in a range
392                          * that already contains PTEs.  In that case we want
393                          * to return -EEXIST immediately.
394                          */
395                         if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
396                                 goto restart;
397                         /*
398                          * Our insertion of a DAX PMD entry failed, most
399                          * likely because it collided with a PTE sized entry
400                          * at a different index in the PMD range.  We haven't
401                          * inserted anything into the radix tree and have no
402                          * waiters to wake.
403                          */
404                         return ERR_PTR(err);
405                 }
406                 /* Good, we have inserted empty locked entry into the tree. */
407                 mapping->nrexceptional++;
408                 spin_unlock_irq(&mapping->tree_lock);
409                 return entry;
410         }
411         /* Normal page in radix tree? */
412         if (!radix_tree_exceptional_entry(entry)) {
413                 struct page *page = entry;
414
415                 get_page(page);
416                 spin_unlock_irq(&mapping->tree_lock);
417                 lock_page(page);
418                 /* Page got truncated? Retry... */
419                 if (unlikely(page->mapping != mapping)) {
420                         unlock_page(page);
421                         put_page(page);
422                         goto restart;
423                 }
424                 return page;
425         }
426         entry = lock_slot(mapping, slot);
427  out_unlock:
428         spin_unlock_irq(&mapping->tree_lock);
429         return entry;
430 }
431
432 /*
433  * We do not necessarily hold the mapping->tree_lock when we call this
434  * function so it is possible that 'entry' is no longer a valid item in the
435  * radix tree.  This is okay because all we really need to do is to find the
436  * correct waitqueue where tasks might be waiting for that old 'entry' and
437  * wake them.
438  */
439 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
440                 pgoff_t index, void *entry, bool wake_all)
441 {
442         struct exceptional_entry_key key;
443         wait_queue_head_t *wq;
444
445         wq = dax_entry_waitqueue(mapping, index, entry, &key);
446
447         /*
448          * Checking for locked entry and prepare_to_wait_exclusive() happens
449          * under mapping->tree_lock, ditto for entry handling in our callers.
450          * So at this point all tasks that could have seen our entry locked
451          * must be in the waitqueue and the following check will see them.
452          */
453         if (waitqueue_active(wq))
454                 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
455 }
456
457 static int __dax_invalidate_mapping_entry(struct address_space *mapping,
458                                           pgoff_t index, bool trunc)
459 {
460         int ret = 0;
461         void *entry;
462         struct radix_tree_root *page_tree = &mapping->page_tree;
463
464         spin_lock_irq(&mapping->tree_lock);
465         entry = get_unlocked_mapping_entry(mapping, index, NULL);
466         if (!entry || !radix_tree_exceptional_entry(entry))
467                 goto out;
468         if (!trunc &&
469             (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
470              radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
471                 goto out;
472         radix_tree_delete(page_tree, index);
473         mapping->nrexceptional--;
474         ret = 1;
475 out:
476         put_unlocked_mapping_entry(mapping, index, entry);
477         spin_unlock_irq(&mapping->tree_lock);
478         return ret;
479 }
480 /*
481  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
482  * entry to get unlocked before deleting it.
483  */
484 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
485 {
486         int ret = __dax_invalidate_mapping_entry(mapping, index, true);
487
488         /*
489          * This gets called from truncate / punch_hole path. As such, the caller
490          * must hold locks protecting against concurrent modifications of the
491          * radix tree (usually fs-private i_mmap_sem for writing). Since the
492          * caller has seen exceptional entry for this index, we better find it
493          * at that index as well...
494          */
495         WARN_ON_ONCE(!ret);
496         return ret;
497 }
498
499 /*
500  * Invalidate exceptional DAX entry if easily possible. This handles DAX
501  * entries for invalidate_inode_pages() so we evict the entry only if we can
502  * do so without blocking.
503  */
504 int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
505 {
506         int ret = 0;
507         void *entry, **slot;
508         struct radix_tree_root *page_tree = &mapping->page_tree;
509
510         spin_lock_irq(&mapping->tree_lock);
511         entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
512         if (!entry || !radix_tree_exceptional_entry(entry) ||
513             slot_locked(mapping, slot))
514                 goto out;
515         if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
516             radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
517                 goto out;
518         radix_tree_delete(page_tree, index);
519         mapping->nrexceptional--;
520         ret = 1;
521 out:
522         spin_unlock_irq(&mapping->tree_lock);
523         if (ret)
524                 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
525         return ret;
526 }
527
528 /*
529  * Invalidate exceptional DAX entry if it is clean.
530  */
531 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
532                                       pgoff_t index)
533 {
534         return __dax_invalidate_mapping_entry(mapping, index, false);
535 }
536
537 /*
538  * The user has performed a load from a hole in the file.  Allocating
539  * a new page in the file would cause excessive storage usage for
540  * workloads with sparse files.  We allocate a page cache page instead.
541  * We'll kick it out of the page cache if it's ever written to,
542  * otherwise it will simply fall out of the page cache under memory
543  * pressure without ever having been dirtied.
544  */
545 static int dax_load_hole(struct address_space *mapping, void **entry,
546                          struct vm_fault *vmf)
547 {
548         struct page *page;
549         int ret;
550
551         /* Hole page already exists? Return it...  */
552         if (!radix_tree_exceptional_entry(*entry)) {
553                 page = *entry;
554                 goto out;
555         }
556
557         /* This will replace locked radix tree entry with a hole page */
558         page = find_or_create_page(mapping, vmf->pgoff,
559                                    vmf->gfp_mask | __GFP_ZERO);
560         if (!page)
561                 return VM_FAULT_OOM;
562  out:
563         vmf->page = page;
564         ret = finish_fault(vmf);
565         vmf->page = NULL;
566         *entry = page;
567         if (!ret) {
568                 /* Grab reference for PTE that is now referencing the page */
569                 get_page(page);
570                 return VM_FAULT_NOPAGE;
571         }
572         return ret;
573 }
574
575 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
576                 struct page *to, unsigned long vaddr)
577 {
578         struct blk_dax_ctl dax = {
579                 .sector = sector,
580                 .size = size,
581         };
582         void *vto;
583
584         if (dax_map_atomic(bdev, &dax) < 0)
585                 return PTR_ERR(dax.addr);
586         vto = kmap_atomic(to);
587         copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
588         kunmap_atomic(vto);
589         dax_unmap_atomic(bdev, &dax);
590         return 0;
591 }
592
593 /*
594  * By this point grab_mapping_entry() has ensured that we have a locked entry
595  * of the appropriate size so we don't have to worry about downgrading PMDs to
596  * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
597  * already in the tree, we will skip the insertion and just dirty the PMD as
598  * appropriate.
599  */
600 static void *dax_insert_mapping_entry(struct address_space *mapping,
601                                       struct vm_fault *vmf,
602                                       void *entry, sector_t sector,
603                                       unsigned long flags)
604 {
605         struct radix_tree_root *page_tree = &mapping->page_tree;
606         int error = 0;
607         bool hole_fill = false;
608         void *new_entry;
609         pgoff_t index = vmf->pgoff;
610
611         if (vmf->flags & FAULT_FLAG_WRITE)
612                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
613
614         /* Replacing hole page with block mapping? */
615         if (!radix_tree_exceptional_entry(entry)) {
616                 hole_fill = true;
617                 /*
618                  * Unmap the page now before we remove it from page cache below.
619                  * The page is locked so it cannot be faulted in again.
620                  */
621                 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
622                                     PAGE_SIZE, 0);
623                 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
624                 if (error)
625                         return ERR_PTR(error);
626         } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
627                 /* replacing huge zero page with PMD block mapping */
628                 unmap_mapping_range(mapping,
629                         (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
630         }
631
632         spin_lock_irq(&mapping->tree_lock);
633         new_entry = dax_radix_locked_entry(sector, flags);
634
635         if (hole_fill) {
636                 __delete_from_page_cache(entry, NULL);
637                 /* Drop pagecache reference */
638                 put_page(entry);
639                 error = __radix_tree_insert(page_tree, index,
640                                 dax_radix_order(new_entry), new_entry);
641                 if (error) {
642                         new_entry = ERR_PTR(error);
643                         goto unlock;
644                 }
645                 mapping->nrexceptional++;
646         } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
647                 /*
648                  * Only swap our new entry into the radix tree if the current
649                  * entry is a zero page or an empty entry.  If a normal PTE or
650                  * PMD entry is already in the tree, we leave it alone.  This
651                  * means that if we are trying to insert a PTE and the
652                  * existing entry is a PMD, we will just leave the PMD in the
653                  * tree and dirty it if necessary.
654                  */
655                 struct radix_tree_node *node;
656                 void **slot;
657                 void *ret;
658
659                 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
660                 WARN_ON_ONCE(ret != entry);
661                 __radix_tree_replace(page_tree, node, slot,
662                                      new_entry, NULL, NULL);
663         }
664         if (vmf->flags & FAULT_FLAG_WRITE)
665                 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
666  unlock:
667         spin_unlock_irq(&mapping->tree_lock);
668         if (hole_fill) {
669                 radix_tree_preload_end();
670                 /*
671                  * We don't need hole page anymore, it has been replaced with
672                  * locked radix tree entry now.
673                  */
674                 if (mapping->a_ops->freepage)
675                         mapping->a_ops->freepage(entry);
676                 unlock_page(entry);
677                 put_page(entry);
678         }
679         return new_entry;
680 }
681
682 static inline unsigned long
683 pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
684 {
685         unsigned long address;
686
687         address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
688         VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
689         return address;
690 }
691
692 /* Walk all mappings of a given index of a file and writeprotect them */
693 static void dax_mapping_entry_mkclean(struct address_space *mapping,
694                                       pgoff_t index, unsigned long pfn)
695 {
696         struct vm_area_struct *vma;
697         pte_t pte, *ptep = NULL;
698         pmd_t *pmdp = NULL;
699         spinlock_t *ptl;
700         bool changed;
701
702         i_mmap_lock_read(mapping);
703         vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
704                 unsigned long address;
705
706                 cond_resched();
707
708                 if (!(vma->vm_flags & VM_SHARED))
709                         continue;
710
711                 address = pgoff_address(index, vma);
712                 changed = false;
713                 if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
714                         continue;
715
716                 if (pmdp) {
717 #ifdef CONFIG_FS_DAX_PMD
718                         pmd_t pmd;
719
720                         if (pfn != pmd_pfn(*pmdp))
721                                 goto unlock_pmd;
722                         if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
723                                 goto unlock_pmd;
724
725                         flush_cache_page(vma, address, pfn);
726                         pmd = pmdp_huge_clear_flush(vma, address, pmdp);
727                         pmd = pmd_wrprotect(pmd);
728                         pmd = pmd_mkclean(pmd);
729                         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
730                         changed = true;
731 unlock_pmd:
732                         spin_unlock(ptl);
733 #endif
734                 } else {
735                         if (pfn != pte_pfn(*ptep))
736                                 goto unlock_pte;
737                         if (!pte_dirty(*ptep) && !pte_write(*ptep))
738                                 goto unlock_pte;
739
740                         flush_cache_page(vma, address, pfn);
741                         pte = ptep_clear_flush(vma, address, ptep);
742                         pte = pte_wrprotect(pte);
743                         pte = pte_mkclean(pte);
744                         set_pte_at(vma->vm_mm, address, ptep, pte);
745                         changed = true;
746 unlock_pte:
747                         pte_unmap_unlock(ptep, ptl);
748                 }
749
750                 if (changed)
751                         mmu_notifier_invalidate_page(vma->vm_mm, address);
752         }
753         i_mmap_unlock_read(mapping);
754 }
755
756 static int dax_writeback_one(struct block_device *bdev,
757                 struct address_space *mapping, pgoff_t index, void *entry)
758 {
759         struct radix_tree_root *page_tree = &mapping->page_tree;
760         struct blk_dax_ctl dax;
761         void *entry2, **slot;
762         int ret = 0;
763
764         /*
765          * A page got tagged dirty in DAX mapping? Something is seriously
766          * wrong.
767          */
768         if (WARN_ON(!radix_tree_exceptional_entry(entry)))
769                 return -EIO;
770
771         spin_lock_irq(&mapping->tree_lock);
772         entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
773         /* Entry got punched out / reallocated? */
774         if (!entry2 || !radix_tree_exceptional_entry(entry2))
775                 goto put_unlocked;
776         /*
777          * Entry got reallocated elsewhere? No need to writeback. We have to
778          * compare sectors as we must not bail out due to difference in lockbit
779          * or entry type.
780          */
781         if (dax_radix_sector(entry2) != dax_radix_sector(entry))
782                 goto put_unlocked;
783         if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
784                                 dax_is_zero_entry(entry))) {
785                 ret = -EIO;
786                 goto put_unlocked;
787         }
788
789         /* Another fsync thread may have already written back this entry */
790         if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
791                 goto put_unlocked;
792         /* Lock the entry to serialize with page faults */
793         entry = lock_slot(mapping, slot);
794         /*
795          * We can clear the tag now but we have to be careful so that concurrent
796          * dax_writeback_one() calls for the same index cannot finish before we
797          * actually flush the caches. This is achieved as the calls will look
798          * at the entry only under tree_lock and once they do that they will
799          * see the entry locked and wait for it to unlock.
800          */
801         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
802         spin_unlock_irq(&mapping->tree_lock);
803
804         /*
805          * Even if dax_writeback_mapping_range() was given a wbc->range_start
806          * in the middle of a PMD, the 'index' we are given will be aligned to
807          * the start index of the PMD, as will the sector we pull from
808          * 'entry'.  This allows us to flush for PMD_SIZE and not have to
809          * worry about partial PMD writebacks.
810          */
811         dax.sector = dax_radix_sector(entry);
812         dax.size = PAGE_SIZE << dax_radix_order(entry);
813
814         /*
815          * We cannot hold tree_lock while calling dax_map_atomic() because it
816          * eventually calls cond_resched().
817          */
818         ret = dax_map_atomic(bdev, &dax);
819         if (ret < 0) {
820                 put_locked_mapping_entry(mapping, index, entry);
821                 return ret;
822         }
823
824         if (WARN_ON_ONCE(ret < dax.size)) {
825                 ret = -EIO;
826                 goto unmap;
827         }
828
829         dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
830         wb_cache_pmem(dax.addr, dax.size);
831         /*
832          * After we have flushed the cache, we can clear the dirty tag. There
833          * cannot be new dirty data in the pfn after the flush has completed as
834          * the pfn mappings are writeprotected and fault waits for mapping
835          * entry lock.
836          */
837         spin_lock_irq(&mapping->tree_lock);
838         radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
839         spin_unlock_irq(&mapping->tree_lock);
840  unmap:
841         dax_unmap_atomic(bdev, &dax);
842         put_locked_mapping_entry(mapping, index, entry);
843         return ret;
844
845  put_unlocked:
846         put_unlocked_mapping_entry(mapping, index, entry2);
847         spin_unlock_irq(&mapping->tree_lock);
848         return ret;
849 }
850
851 /*
852  * Flush the mapping to the persistent domain within the byte range of [start,
853  * end]. This is required by data integrity operations to ensure file data is
854  * on persistent storage prior to completion of the operation.
855  */
856 int dax_writeback_mapping_range(struct address_space *mapping,
857                 struct block_device *bdev, struct writeback_control *wbc)
858 {
859         struct inode *inode = mapping->host;
860         pgoff_t start_index, end_index;
861         pgoff_t indices[PAGEVEC_SIZE];
862         struct pagevec pvec;
863         bool done = false;
864         int i, ret = 0;
865
866         if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
867                 return -EIO;
868
869         if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
870                 return 0;
871
872         start_index = wbc->range_start >> PAGE_SHIFT;
873         end_index = wbc->range_end >> PAGE_SHIFT;
874
875         tag_pages_for_writeback(mapping, start_index, end_index);
876
877         pagevec_init(&pvec, 0);
878         while (!done) {
879                 pvec.nr = find_get_entries_tag(mapping, start_index,
880                                 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
881                                 pvec.pages, indices);
882
883                 if (pvec.nr == 0)
884                         break;
885
886                 for (i = 0; i < pvec.nr; i++) {
887                         if (indices[i] > end_index) {
888                                 done = true;
889                                 break;
890                         }
891
892                         ret = dax_writeback_one(bdev, mapping, indices[i],
893                                         pvec.pages[i]);
894                         if (ret < 0)
895                                 return ret;
896                 }
897         }
898         return 0;
899 }
900 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
901
902 static int dax_insert_mapping(struct address_space *mapping,
903                 struct block_device *bdev, sector_t sector, size_t size,
904                 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
905 {
906         unsigned long vaddr = vmf->address;
907         struct blk_dax_ctl dax = {
908                 .sector = sector,
909                 .size = size,
910         };
911         void *ret;
912         void *entry = *entryp;
913
914         if (dax_map_atomic(bdev, &dax) < 0)
915                 return PTR_ERR(dax.addr);
916         dax_unmap_atomic(bdev, &dax);
917
918         ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
919         if (IS_ERR(ret))
920                 return PTR_ERR(ret);
921         *entryp = ret;
922
923         return vm_insert_mixed(vma, vaddr, dax.pfn);
924 }
925
926 /**
927  * dax_pfn_mkwrite - handle first write to DAX page
928  * @vmf: The description of the fault
929  */
930 int dax_pfn_mkwrite(struct vm_fault *vmf)
931 {
932         struct file *file = vmf->vma->vm_file;
933         struct address_space *mapping = file->f_mapping;
934         void *entry, **slot;
935         pgoff_t index = vmf->pgoff;
936
937         spin_lock_irq(&mapping->tree_lock);
938         entry = get_unlocked_mapping_entry(mapping, index, &slot);
939         if (!entry || !radix_tree_exceptional_entry(entry)) {
940                 if (entry)
941                         put_unlocked_mapping_entry(mapping, index, entry);
942                 spin_unlock_irq(&mapping->tree_lock);
943                 return VM_FAULT_NOPAGE;
944         }
945         radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
946         entry = lock_slot(mapping, slot);
947         spin_unlock_irq(&mapping->tree_lock);
948         /*
949          * If we race with somebody updating the PTE and finish_mkwrite_fault()
950          * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
951          * the fault in either case.
952          */
953         finish_mkwrite_fault(vmf);
954         put_locked_mapping_entry(mapping, index, entry);
955         return VM_FAULT_NOPAGE;
956 }
957 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
958
959 static bool dax_range_is_aligned(struct block_device *bdev,
960                                  unsigned int offset, unsigned int length)
961 {
962         unsigned short sector_size = bdev_logical_block_size(bdev);
963
964         if (!IS_ALIGNED(offset, sector_size))
965                 return false;
966         if (!IS_ALIGNED(length, sector_size))
967                 return false;
968
969         return true;
970 }
971
972 int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
973                 unsigned int offset, unsigned int length)
974 {
975         struct blk_dax_ctl dax = {
976                 .sector         = sector,
977                 .size           = PAGE_SIZE,
978         };
979
980         if (dax_range_is_aligned(bdev, offset, length)) {
981                 sector_t start_sector = dax.sector + (offset >> 9);
982
983                 return blkdev_issue_zeroout(bdev, start_sector,
984                                 length >> 9, GFP_NOFS, true);
985         } else {
986                 if (dax_map_atomic(bdev, &dax) < 0)
987                         return PTR_ERR(dax.addr);
988                 clear_pmem(dax.addr + offset, length);
989                 dax_unmap_atomic(bdev, &dax);
990         }
991         return 0;
992 }
993 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
994
995 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
996 {
997         return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
998 }
999
1000 static loff_t
1001 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1002                 struct iomap *iomap)
1003 {
1004         struct iov_iter *iter = data;
1005         loff_t end = pos + length, done = 0;
1006         ssize_t ret = 0;
1007
1008         if (iov_iter_rw(iter) == READ) {
1009                 end = min(end, i_size_read(inode));
1010                 if (pos >= end)
1011                         return 0;
1012
1013                 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1014                         return iov_iter_zero(min(length, end - pos), iter);
1015         }
1016
1017         if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1018                 return -EIO;
1019
1020         /*
1021          * Write can allocate block for an area which has a hole page mapped
1022          * into page tables. We have to tear down these mappings so that data
1023          * written by write(2) is visible in mmap.
1024          */
1025         if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
1026                 invalidate_inode_pages2_range(inode->i_mapping,
1027                                               pos >> PAGE_SHIFT,
1028                                               (end - 1) >> PAGE_SHIFT);
1029         }
1030
1031         while (pos < end) {
1032                 unsigned offset = pos & (PAGE_SIZE - 1);
1033                 struct blk_dax_ctl dax = { 0 };
1034                 ssize_t map_len;
1035
1036                 if (fatal_signal_pending(current)) {
1037                         ret = -EINTR;
1038                         break;
1039                 }
1040
1041                 dax.sector = dax_iomap_sector(iomap, pos);
1042                 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
1043                 map_len = dax_map_atomic(iomap->bdev, &dax);
1044                 if (map_len < 0) {
1045                         ret = map_len;
1046                         break;
1047                 }
1048
1049                 dax.addr += offset;
1050                 map_len -= offset;
1051                 if (map_len > end - pos)
1052                         map_len = end - pos;
1053
1054                 if (iov_iter_rw(iter) == WRITE)
1055                         map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
1056                 else
1057                         map_len = copy_to_iter(dax.addr, map_len, iter);
1058                 dax_unmap_atomic(iomap->bdev, &dax);
1059                 if (map_len <= 0) {
1060                         ret = map_len ? map_len : -EFAULT;
1061                         break;
1062                 }
1063
1064                 pos += map_len;
1065                 length -= map_len;
1066                 done += map_len;
1067         }
1068
1069         return done ? done : ret;
1070 }
1071
1072 /**
1073  * dax_iomap_rw - Perform I/O to a DAX file
1074  * @iocb:       The control block for this I/O
1075  * @iter:       The addresses to do I/O from or to
1076  * @ops:        iomap ops passed from the file system
1077  *
1078  * This function performs read and write operations to directly mapped
1079  * persistent memory.  The callers needs to take care of read/write exclusion
1080  * and evicting any page cache pages in the region under I/O.
1081  */
1082 ssize_t
1083 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1084                 const struct iomap_ops *ops)
1085 {
1086         struct address_space *mapping = iocb->ki_filp->f_mapping;
1087         struct inode *inode = mapping->host;
1088         loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1089         unsigned flags = 0;
1090
1091         if (iov_iter_rw(iter) == WRITE) {
1092                 lockdep_assert_held_exclusive(&inode->i_rwsem);
1093                 flags |= IOMAP_WRITE;
1094         } else {
1095                 lockdep_assert_held(&inode->i_rwsem);
1096         }
1097
1098         while (iov_iter_count(iter)) {
1099                 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1100                                 iter, dax_iomap_actor);
1101                 if (ret <= 0)
1102                         break;
1103                 pos += ret;
1104                 done += ret;
1105         }
1106
1107         iocb->ki_pos += done;
1108         return done ? done : ret;
1109 }
1110 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1111
1112 static int dax_fault_return(int error)
1113 {
1114         if (error == 0)
1115                 return VM_FAULT_NOPAGE;
1116         if (error == -ENOMEM)
1117                 return VM_FAULT_OOM;
1118         return VM_FAULT_SIGBUS;
1119 }
1120
1121 static int dax_iomap_pte_fault(struct vm_fault *vmf,
1122                                const struct iomap_ops *ops)
1123 {
1124         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1125         struct inode *inode = mapping->host;
1126         unsigned long vaddr = vmf->address;
1127         loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1128         sector_t sector;
1129         struct iomap iomap = { 0 };
1130         unsigned flags = IOMAP_FAULT;
1131         int error, major = 0;
1132         int vmf_ret = 0;
1133         void *entry;
1134
1135         /*
1136          * Check whether offset isn't beyond end of file now. Caller is supposed
1137          * to hold locks serializing us with truncate / punch hole so this is
1138          * a reliable test.
1139          */
1140         if (pos >= i_size_read(inode))
1141                 return VM_FAULT_SIGBUS;
1142
1143         if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
1144                 flags |= IOMAP_WRITE;
1145
1146         /*
1147          * Note that we don't bother to use iomap_apply here: DAX required
1148          * the file system block size to be equal the page size, which means
1149          * that we never have to deal with more than a single extent here.
1150          */
1151         error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1152         if (error)
1153                 return dax_fault_return(error);
1154         if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1155                 vmf_ret = dax_fault_return(-EIO);       /* fs corruption? */
1156                 goto finish_iomap;
1157         }
1158
1159         entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1160         if (IS_ERR(entry)) {
1161                 vmf_ret = dax_fault_return(PTR_ERR(entry));
1162                 goto finish_iomap;
1163         }
1164
1165         sector = dax_iomap_sector(&iomap, pos);
1166
1167         if (vmf->cow_page) {
1168                 switch (iomap.type) {
1169                 case IOMAP_HOLE:
1170                 case IOMAP_UNWRITTEN:
1171                         clear_user_highpage(vmf->cow_page, vaddr);
1172                         break;
1173                 case IOMAP_MAPPED:
1174                         error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
1175                                         vmf->cow_page, vaddr);
1176                         break;
1177                 default:
1178                         WARN_ON_ONCE(1);
1179                         error = -EIO;
1180                         break;
1181                 }
1182
1183                 if (error)
1184                         goto error_unlock_entry;
1185
1186                 __SetPageUptodate(vmf->cow_page);
1187                 vmf_ret = finish_fault(vmf);
1188                 if (!vmf_ret)
1189                         vmf_ret = VM_FAULT_DONE_COW;
1190                 goto unlock_entry;
1191         }
1192
1193         switch (iomap.type) {
1194         case IOMAP_MAPPED:
1195                 if (iomap.flags & IOMAP_F_NEW) {
1196                         count_vm_event(PGMAJFAULT);
1197                         mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1198                         major = VM_FAULT_MAJOR;
1199                 }
1200                 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1201                                 PAGE_SIZE, &entry, vmf->vma, vmf);
1202                 /* -EBUSY is fine, somebody else faulted on the same PTE */
1203                 if (error == -EBUSY)
1204                         error = 0;
1205                 break;
1206         case IOMAP_UNWRITTEN:
1207         case IOMAP_HOLE:
1208                 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1209                         vmf_ret = dax_load_hole(mapping, &entry, vmf);
1210                         goto unlock_entry;
1211                 }
1212                 /*FALLTHRU*/
1213         default:
1214                 WARN_ON_ONCE(1);
1215                 error = -EIO;
1216                 break;
1217         }
1218
1219  error_unlock_entry:
1220         vmf_ret = dax_fault_return(error) | major;
1221  unlock_entry:
1222         put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1223  finish_iomap:
1224         if (ops->iomap_end) {
1225                 int copied = PAGE_SIZE;
1226
1227                 if (vmf_ret & VM_FAULT_ERROR)
1228                         copied = 0;
1229                 /*
1230                  * The fault is done by now and there's no way back (other
1231                  * thread may be already happily using PTE we have installed).
1232                  * Just ignore error from ->iomap_end since we cannot do much
1233                  * with it.
1234                  */
1235                 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1236         }
1237         return vmf_ret;
1238 }
1239
1240 #ifdef CONFIG_FS_DAX_PMD
1241 /*
1242  * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
1243  * more often than one might expect in the below functions.
1244  */
1245 #define PG_PMD_COLOUR   ((PMD_SIZE >> PAGE_SHIFT) - 1)
1246
1247 static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1248                 loff_t pos, void **entryp)
1249 {
1250         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1251         struct block_device *bdev = iomap->bdev;
1252         struct inode *inode = mapping->host;
1253         struct blk_dax_ctl dax = {
1254                 .sector = dax_iomap_sector(iomap, pos),
1255                 .size = PMD_SIZE,
1256         };
1257         long length = dax_map_atomic(bdev, &dax);
1258         void *ret = NULL;
1259
1260         if (length < 0) /* dax_map_atomic() failed */
1261                 goto fallback;
1262         if (length < PMD_SIZE)
1263                 goto unmap_fallback;
1264         if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1265                 goto unmap_fallback;
1266         if (!pfn_t_devmap(dax.pfn))
1267                 goto unmap_fallback;
1268
1269         dax_unmap_atomic(bdev, &dax);
1270
1271         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1272                         RADIX_DAX_PMD);
1273         if (IS_ERR(ret))
1274                 goto fallback;
1275         *entryp = ret;
1276
1277         trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
1278         return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1279                         dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
1280
1281  unmap_fallback:
1282         dax_unmap_atomic(bdev, &dax);
1283 fallback:
1284         trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
1285                         dax.pfn, ret);
1286         return VM_FAULT_FALLBACK;
1287 }
1288
1289 static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1290                 void **entryp)
1291 {
1292         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1293         unsigned long pmd_addr = vmf->address & PMD_MASK;
1294         struct inode *inode = mapping->host;
1295         struct page *zero_page;
1296         void *ret = NULL;
1297         spinlock_t *ptl;
1298         pmd_t pmd_entry;
1299
1300         zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1301
1302         if (unlikely(!zero_page))
1303                 goto fallback;
1304
1305         ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1306                         RADIX_DAX_PMD | RADIX_DAX_HZP);
1307         if (IS_ERR(ret))
1308                 goto fallback;
1309         *entryp = ret;
1310
1311         ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1312         if (!pmd_none(*(vmf->pmd))) {
1313                 spin_unlock(ptl);
1314                 goto fallback;
1315         }
1316
1317         pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1318         pmd_entry = pmd_mkhuge(pmd_entry);
1319         set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1320         spin_unlock(ptl);
1321         trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1322         return VM_FAULT_NOPAGE;
1323
1324 fallback:
1325         trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1326         return VM_FAULT_FALLBACK;
1327 }
1328
1329 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1330                                const struct iomap_ops *ops)
1331 {
1332         struct vm_area_struct *vma = vmf->vma;
1333         struct address_space *mapping = vma->vm_file->f_mapping;
1334         unsigned long pmd_addr = vmf->address & PMD_MASK;
1335         bool write = vmf->flags & FAULT_FLAG_WRITE;
1336         unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1337         struct inode *inode = mapping->host;
1338         int result = VM_FAULT_FALLBACK;
1339         struct iomap iomap = { 0 };
1340         pgoff_t max_pgoff, pgoff;
1341         void *entry;
1342         loff_t pos;
1343         int error;
1344
1345         /*
1346          * Check whether offset isn't beyond end of file now. Caller is
1347          * supposed to hold locks serializing us with truncate / punch hole so
1348          * this is a reliable test.
1349          */
1350         pgoff = linear_page_index(vma, pmd_addr);
1351         max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1352
1353         trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1354
1355         /* Fall back to PTEs if we're going to COW */
1356         if (write && !(vma->vm_flags & VM_SHARED))
1357                 goto fallback;
1358
1359         /* If the PMD would extend outside the VMA */
1360         if (pmd_addr < vma->vm_start)
1361                 goto fallback;
1362         if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1363                 goto fallback;
1364
1365         if (pgoff > max_pgoff) {
1366                 result = VM_FAULT_SIGBUS;
1367                 goto out;
1368         }
1369
1370         /* If the PMD would extend beyond the file size */
1371         if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1372                 goto fallback;
1373
1374         /*
1375          * Note that we don't use iomap_apply here.  We aren't doing I/O, only
1376          * setting up a mapping, so really we're using iomap_begin() as a way
1377          * to look up our filesystem block.
1378          */
1379         pos = (loff_t)pgoff << PAGE_SHIFT;
1380         error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1381         if (error)
1382                 goto fallback;
1383
1384         if (iomap.offset + iomap.length < pos + PMD_SIZE)
1385                 goto finish_iomap;
1386
1387         /*
1388          * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1389          * PMD or a HZP entry.  If it can't (because a 4k page is already in
1390          * the tree, for instance), it will return -EEXIST and we just fall
1391          * back to 4k entries.
1392          */
1393         entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1394         if (IS_ERR(entry))
1395                 goto finish_iomap;
1396
1397         switch (iomap.type) {
1398         case IOMAP_MAPPED:
1399                 result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1400                 break;
1401         case IOMAP_UNWRITTEN:
1402         case IOMAP_HOLE:
1403                 if (WARN_ON_ONCE(write))
1404                         goto unlock_entry;
1405                 result = dax_pmd_load_hole(vmf, &iomap, &entry);
1406                 break;
1407         default:
1408                 WARN_ON_ONCE(1);
1409                 break;
1410         }
1411
1412  unlock_entry:
1413         put_locked_mapping_entry(mapping, pgoff, entry);
1414  finish_iomap:
1415         if (ops->iomap_end) {
1416                 int copied = PMD_SIZE;
1417
1418                 if (result == VM_FAULT_FALLBACK)
1419                         copied = 0;
1420                 /*
1421                  * The fault is done by now and there's no way back (other
1422                  * thread may be already happily using PMD we have installed).
1423                  * Just ignore error from ->iomap_end since we cannot do much
1424                  * with it.
1425                  */
1426                 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1427                                 &iomap);
1428         }
1429  fallback:
1430         if (result == VM_FAULT_FALLBACK) {
1431                 split_huge_pmd(vma, vmf->pmd, vmf->address);
1432                 count_vm_event(THP_FAULT_FALLBACK);
1433         }
1434 out:
1435         trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1436         return result;
1437 }
1438 #else
1439 static int dax_iomap_pmd_fault(struct vm_fault *vmf,
1440                                const struct iomap_ops *ops)
1441 {
1442         return VM_FAULT_FALLBACK;
1443 }
1444 #endif /* CONFIG_FS_DAX_PMD */
1445
1446 /**
1447  * dax_iomap_fault - handle a page fault on a DAX file
1448  * @vmf: The description of the fault
1449  * @ops: iomap ops passed from the file system
1450  *
1451  * When a page fault occurs, filesystems may call this helper in
1452  * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1453  * has done all the necessary locking for page fault to proceed
1454  * successfully.
1455  */
1456 int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1457                     const struct iomap_ops *ops)
1458 {
1459         switch (pe_size) {
1460         case PE_SIZE_PTE:
1461                 return dax_iomap_pte_fault(vmf, ops);
1462         case PE_SIZE_PMD:
1463                 return dax_iomap_pmd_fault(vmf, ops);
1464         default:
1465                 return VM_FAULT_FALLBACK;
1466         }
1467 }
1468 EXPORT_SYMBOL_GPL(dax_iomap_fault);