]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/nfs/pnfs.c
pnfs: don't check sequence on new stateids in layoutget
[karo-tx-linux.git] / fs / nfs / pnfs.c
1 /*
2  *  pNFS functions to call and manage layout drivers.
3  *
4  *  Copyright (c) 2002 [year of first publication]
5  *  The Regents of the University of Michigan
6  *  All Rights Reserved
7  *
8  *  Dean Hildebrand <dhildebz@umich.edu>
9  *
10  *  Permission is granted to use, copy, create derivative works, and
11  *  redistribute this software and such derivative works for any purpose,
12  *  so long as the name of the University of Michigan is not used in
13  *  any advertising or publicity pertaining to the use or distribution
14  *  of this software without specific, written prior authorization. If
15  *  the above copyright notice or any other identification of the
16  *  University of Michigan is included in any copy of any portion of
17  *  this software, then the disclaimer below must also be included.
18  *
19  *  This software is provided as is, without representation or warranty
20  *  of any kind either express or implied, including without limitation
21  *  the implied warranties of merchantability, fitness for a particular
22  *  purpose, or noninfringement.  The Regents of the University of
23  *  Michigan shall not be liable for any damages, including special,
24  *  indirect, incidental, or consequential damages, with respect to any
25  *  claim arising out of or in connection with the use of the software,
26  *  even if it has been or is hereafter advised of the possibility of
27  *  such damages.
28  */
29
30 #include <linux/nfs_fs.h>
31 #include <linux/nfs_page.h>
32 #include <linux/module.h>
33 #include "internal.h"
34 #include "pnfs.h"
35 #include "iostat.h"
36 #include "nfs4trace.h"
37
38 #define NFSDBG_FACILITY         NFSDBG_PNFS
39 #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
40
41 /* Locking:
42  *
43  * pnfs_spinlock:
44  *      protects pnfs_modules_tbl.
45  */
46 static DEFINE_SPINLOCK(pnfs_spinlock);
47
48 /*
49  * pnfs_modules_tbl holds all pnfs modules
50  */
51 static LIST_HEAD(pnfs_modules_tbl);
52
53 /* Return the registered pnfs layout driver module matching given id */
54 static struct pnfs_layoutdriver_type *
55 find_pnfs_driver_locked(u32 id)
56 {
57         struct pnfs_layoutdriver_type *local;
58
59         list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
60                 if (local->id == id)
61                         goto out;
62         local = NULL;
63 out:
64         dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
65         return local;
66 }
67
68 static struct pnfs_layoutdriver_type *
69 find_pnfs_driver(u32 id)
70 {
71         struct pnfs_layoutdriver_type *local;
72
73         spin_lock(&pnfs_spinlock);
74         local = find_pnfs_driver_locked(id);
75         if (local != NULL && !try_module_get(local->owner)) {
76                 dprintk("%s: Could not grab reference on module\n", __func__);
77                 local = NULL;
78         }
79         spin_unlock(&pnfs_spinlock);
80         return local;
81 }
82
83 void
84 unset_pnfs_layoutdriver(struct nfs_server *nfss)
85 {
86         if (nfss->pnfs_curr_ld) {
87                 if (nfss->pnfs_curr_ld->clear_layoutdriver)
88                         nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
89                 /* Decrement the MDS count. Purge the deviceid cache if zero */
90                 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
91                         nfs4_deviceid_purge_client(nfss->nfs_client);
92                 module_put(nfss->pnfs_curr_ld->owner);
93         }
94         nfss->pnfs_curr_ld = NULL;
95 }
96
97 /*
98  * Try to set the server's pnfs module to the pnfs layout type specified by id.
99  * Currently only one pNFS layout driver per filesystem is supported.
100  *
101  * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
102  */
103 void
104 set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
105                       u32 id)
106 {
107         struct pnfs_layoutdriver_type *ld_type = NULL;
108
109         if (id == 0)
110                 goto out_no_driver;
111         if (!(server->nfs_client->cl_exchange_flags &
112                  (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
113                 printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
114                         __func__, id, server->nfs_client->cl_exchange_flags);
115                 goto out_no_driver;
116         }
117         ld_type = find_pnfs_driver(id);
118         if (!ld_type) {
119                 request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
120                 ld_type = find_pnfs_driver(id);
121                 if (!ld_type) {
122                         dprintk("%s: No pNFS module found for %u.\n",
123                                 __func__, id);
124                         goto out_no_driver;
125                 }
126         }
127         server->pnfs_curr_ld = ld_type;
128         if (ld_type->set_layoutdriver
129             && ld_type->set_layoutdriver(server, mntfh)) {
130                 printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
131                         "driver %u.\n", __func__, id);
132                 module_put(ld_type->owner);
133                 goto out_no_driver;
134         }
135         /* Bump the MDS count */
136         atomic_inc(&server->nfs_client->cl_mds_count);
137
138         dprintk("%s: pNFS module for %u set\n", __func__, id);
139         return;
140
141 out_no_driver:
142         dprintk("%s: Using NFSv4 I/O\n", __func__);
143         server->pnfs_curr_ld = NULL;
144 }
145
146 int
147 pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
148 {
149         int status = -EINVAL;
150         struct pnfs_layoutdriver_type *tmp;
151
152         if (ld_type->id == 0) {
153                 printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
154                 return status;
155         }
156         if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
157                 printk(KERN_ERR "NFS: %s Layout driver must provide "
158                        "alloc_lseg and free_lseg.\n", __func__);
159                 return status;
160         }
161
162         spin_lock(&pnfs_spinlock);
163         tmp = find_pnfs_driver_locked(ld_type->id);
164         if (!tmp) {
165                 list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
166                 status = 0;
167                 dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
168                         ld_type->name);
169         } else {
170                 printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
171                         __func__, ld_type->id);
172         }
173         spin_unlock(&pnfs_spinlock);
174
175         return status;
176 }
177 EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
178
179 void
180 pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
181 {
182         dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
183         spin_lock(&pnfs_spinlock);
184         list_del(&ld_type->pnfs_tblid);
185         spin_unlock(&pnfs_spinlock);
186 }
187 EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
188
189 /*
190  * pNFS client layout cache
191  */
192
193 /* Need to hold i_lock if caller does not already hold reference */
194 void
195 pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
196 {
197         atomic_inc(&lo->plh_refcount);
198 }
199
200 static struct pnfs_layout_hdr *
201 pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
202 {
203         struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
204         return ld->alloc_layout_hdr(ino, gfp_flags);
205 }
206
207 static void
208 pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
209 {
210         struct nfs_server *server = NFS_SERVER(lo->plh_inode);
211         struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
212
213         if (!list_empty(&lo->plh_layouts)) {
214                 struct nfs_client *clp = server->nfs_client;
215
216                 spin_lock(&clp->cl_lock);
217                 list_del_init(&lo->plh_layouts);
218                 spin_unlock(&clp->cl_lock);
219         }
220         put_rpccred(lo->plh_lc_cred);
221         return ld->free_layout_hdr(lo);
222 }
223
224 static void
225 pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
226 {
227         struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
228         dprintk("%s: freeing layout cache %p\n", __func__, lo);
229         nfsi->layout = NULL;
230         /* Reset MDS Threshold I/O counters */
231         nfsi->write_io = 0;
232         nfsi->read_io = 0;
233 }
234
235 void
236 pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
237 {
238         struct inode *inode = lo->plh_inode;
239
240         if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
241                 pnfs_detach_layout_hdr(lo);
242                 spin_unlock(&inode->i_lock);
243                 pnfs_free_layout_hdr(lo);
244         }
245 }
246
247 static int
248 pnfs_iomode_to_fail_bit(u32 iomode)
249 {
250         return iomode == IOMODE_RW ?
251                 NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
252 }
253
254 static void
255 pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
256 {
257         lo->plh_retry_timestamp = jiffies;
258         if (!test_and_set_bit(fail_bit, &lo->plh_flags))
259                 atomic_inc(&lo->plh_refcount);
260 }
261
262 static void
263 pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
264 {
265         if (test_and_clear_bit(fail_bit, &lo->plh_flags))
266                 atomic_dec(&lo->plh_refcount);
267 }
268
269 static void
270 pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
271 {
272         struct inode *inode = lo->plh_inode;
273         struct pnfs_layout_range range = {
274                 .iomode = iomode,
275                 .offset = 0,
276                 .length = NFS4_MAX_UINT64,
277         };
278         LIST_HEAD(head);
279
280         spin_lock(&inode->i_lock);
281         pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
282         pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
283         spin_unlock(&inode->i_lock);
284         pnfs_free_lseg_list(&head);
285         dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
286                         iomode == IOMODE_RW ?  "RW" : "READ");
287 }
288
289 static bool
290 pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
291 {
292         unsigned long start, end;
293         int fail_bit = pnfs_iomode_to_fail_bit(iomode);
294
295         if (test_bit(fail_bit, &lo->plh_flags) == 0)
296                 return false;
297         end = jiffies;
298         start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
299         if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
300                 /* It is time to retry the failed layoutgets */
301                 pnfs_layout_clear_fail_bit(lo, fail_bit);
302                 return false;
303         }
304         return true;
305 }
306
307 static void
308 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
309 {
310         INIT_LIST_HEAD(&lseg->pls_list);
311         INIT_LIST_HEAD(&lseg->pls_lc_list);
312         atomic_set(&lseg->pls_refcount, 1);
313         smp_mb();
314         set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
315         lseg->pls_layout = lo;
316 }
317
318 static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
319 {
320         struct inode *ino = lseg->pls_layout->plh_inode;
321
322         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
323 }
324
325 static void
326 pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
327                 struct pnfs_layout_segment *lseg)
328 {
329         struct inode *inode = lo->plh_inode;
330
331         WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
332         list_del_init(&lseg->pls_list);
333         /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
334         atomic_dec(&lo->plh_refcount);
335         if (list_empty(&lo->plh_segs))
336                 clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
337         rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
338 }
339
340 void
341 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
342 {
343         struct pnfs_layout_hdr *lo;
344         struct inode *inode;
345
346         if (!lseg)
347                 return;
348
349         dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
350                 atomic_read(&lseg->pls_refcount),
351                 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
352         lo = lseg->pls_layout;
353         inode = lo->plh_inode;
354         if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
355                 pnfs_get_layout_hdr(lo);
356                 pnfs_layout_remove_lseg(lo, lseg);
357                 spin_unlock(&inode->i_lock);
358                 pnfs_free_lseg(lseg);
359                 pnfs_put_layout_hdr(lo);
360         }
361 }
362 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
363
364 static void pnfs_put_lseg_async_work(struct work_struct *work)
365 {
366         struct pnfs_layout_segment *lseg;
367
368         lseg = container_of(work, struct pnfs_layout_segment, pls_work);
369
370         pnfs_put_lseg(lseg);
371 }
372
373 void
374 pnfs_put_lseg_async(struct pnfs_layout_segment *lseg)
375 {
376         INIT_WORK(&lseg->pls_work, pnfs_put_lseg_async_work);
377         schedule_work(&lseg->pls_work);
378 }
379 EXPORT_SYMBOL_GPL(pnfs_put_lseg_async);
380
381 static u64
382 end_offset(u64 start, u64 len)
383 {
384         u64 end;
385
386         end = start + len;
387         return end >= start ? end : NFS4_MAX_UINT64;
388 }
389
390 /*
391  * is l2 fully contained in l1?
392  *   start1                             end1
393  *   [----------------------------------)
394  *           start2           end2
395  *           [----------------)
396  */
397 static bool
398 pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
399                  const struct pnfs_layout_range *l2)
400 {
401         u64 start1 = l1->offset;
402         u64 end1 = end_offset(start1, l1->length);
403         u64 start2 = l2->offset;
404         u64 end2 = end_offset(start2, l2->length);
405
406         return (start1 <= start2) && (end1 >= end2);
407 }
408
409 /*
410  * is l1 and l2 intersecting?
411  *   start1                             end1
412  *   [----------------------------------)
413  *                              start2           end2
414  *                              [----------------)
415  */
416 static bool
417 pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
418                     const struct pnfs_layout_range *l2)
419 {
420         u64 start1 = l1->offset;
421         u64 end1 = end_offset(start1, l1->length);
422         u64 start2 = l2->offset;
423         u64 end2 = end_offset(start2, l2->length);
424
425         return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
426                (end2 == NFS4_MAX_UINT64 || end2 > start1);
427 }
428
429 static bool
430 should_free_lseg(const struct pnfs_layout_range *lseg_range,
431                  const struct pnfs_layout_range *recall_range)
432 {
433         return (recall_range->iomode == IOMODE_ANY ||
434                 lseg_range->iomode == recall_range->iomode) &&
435                pnfs_lseg_range_intersecting(lseg_range, recall_range);
436 }
437
438 static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
439                 struct list_head *tmp_list)
440 {
441         if (!atomic_dec_and_test(&lseg->pls_refcount))
442                 return false;
443         pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
444         list_add(&lseg->pls_list, tmp_list);
445         return true;
446 }
447
448 /* Returns 1 if lseg is removed from list, 0 otherwise */
449 static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
450                              struct list_head *tmp_list)
451 {
452         int rv = 0;
453
454         if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
455                 /* Remove the reference keeping the lseg in the
456                  * list.  It will now be removed when all
457                  * outstanding io is finished.
458                  */
459                 dprintk("%s: lseg %p ref %d\n", __func__, lseg,
460                         atomic_read(&lseg->pls_refcount));
461                 if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
462                         rv = 1;
463         }
464         return rv;
465 }
466
467 /* Returns count of number of matching invalid lsegs remaining in list
468  * after call.
469  */
470 int
471 pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
472                             struct list_head *tmp_list,
473                             struct pnfs_layout_range *recall_range)
474 {
475         struct pnfs_layout_segment *lseg, *next;
476         int invalid = 0, removed = 0;
477
478         dprintk("%s:Begin lo %p\n", __func__, lo);
479
480         if (list_empty(&lo->plh_segs))
481                 return 0;
482         list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
483                 if (!recall_range ||
484                     should_free_lseg(&lseg->pls_range, recall_range)) {
485                         dprintk("%s: freeing lseg %p iomode %d "
486                                 "offset %llu length %llu\n", __func__,
487                                 lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
488                                 lseg->pls_range.length);
489                         invalid++;
490                         removed += mark_lseg_invalid(lseg, tmp_list);
491                 }
492         dprintk("%s:Return %i\n", __func__, invalid - removed);
493         return invalid - removed;
494 }
495
496 /* note free_me must contain lsegs from a single layout_hdr */
497 void
498 pnfs_free_lseg_list(struct list_head *free_me)
499 {
500         struct pnfs_layout_segment *lseg, *tmp;
501
502         if (list_empty(free_me))
503                 return;
504
505         list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
506                 list_del(&lseg->pls_list);
507                 pnfs_free_lseg(lseg);
508         }
509 }
510
511 void
512 pnfs_destroy_layout(struct nfs_inode *nfsi)
513 {
514         struct pnfs_layout_hdr *lo;
515         LIST_HEAD(tmp_list);
516
517         spin_lock(&nfsi->vfs_inode.i_lock);
518         lo = nfsi->layout;
519         if (lo) {
520                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
521                 pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
522                 pnfs_get_layout_hdr(lo);
523                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
524                 pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
525                 spin_unlock(&nfsi->vfs_inode.i_lock);
526                 pnfs_free_lseg_list(&tmp_list);
527                 pnfs_put_layout_hdr(lo);
528         } else
529                 spin_unlock(&nfsi->vfs_inode.i_lock);
530 }
531 EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
532
533 static bool
534 pnfs_layout_add_bulk_destroy_list(struct inode *inode,
535                 struct list_head *layout_list)
536 {
537         struct pnfs_layout_hdr *lo;
538         bool ret = false;
539
540         spin_lock(&inode->i_lock);
541         lo = NFS_I(inode)->layout;
542         if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
543                 pnfs_get_layout_hdr(lo);
544                 list_add(&lo->plh_bulk_destroy, layout_list);
545                 ret = true;
546         }
547         spin_unlock(&inode->i_lock);
548         return ret;
549 }
550
551 /* Caller must hold rcu_read_lock and clp->cl_lock */
552 static int
553 pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
554                 struct nfs_server *server,
555                 struct list_head *layout_list)
556 {
557         struct pnfs_layout_hdr *lo, *next;
558         struct inode *inode;
559
560         list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
561                 inode = igrab(lo->plh_inode);
562                 if (inode == NULL)
563                         continue;
564                 list_del_init(&lo->plh_layouts);
565                 if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
566                         continue;
567                 rcu_read_unlock();
568                 spin_unlock(&clp->cl_lock);
569                 iput(inode);
570                 spin_lock(&clp->cl_lock);
571                 rcu_read_lock();
572                 return -EAGAIN;
573         }
574         return 0;
575 }
576
577 static int
578 pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
579                 bool is_bulk_recall)
580 {
581         struct pnfs_layout_hdr *lo;
582         struct inode *inode;
583         struct pnfs_layout_range range = {
584                 .iomode = IOMODE_ANY,
585                 .offset = 0,
586                 .length = NFS4_MAX_UINT64,
587         };
588         LIST_HEAD(lseg_list);
589         int ret = 0;
590
591         while (!list_empty(layout_list)) {
592                 lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
593                                 plh_bulk_destroy);
594                 dprintk("%s freeing layout for inode %lu\n", __func__,
595                         lo->plh_inode->i_ino);
596                 inode = lo->plh_inode;
597                 spin_lock(&inode->i_lock);
598                 list_del_init(&lo->plh_bulk_destroy);
599                 lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
600                 if (is_bulk_recall)
601                         set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
602                 if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
603                         ret = -EAGAIN;
604                 spin_unlock(&inode->i_lock);
605                 pnfs_free_lseg_list(&lseg_list);
606                 pnfs_put_layout_hdr(lo);
607                 iput(inode);
608         }
609         return ret;
610 }
611
612 int
613 pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
614                 struct nfs_fsid *fsid,
615                 bool is_recall)
616 {
617         struct nfs_server *server;
618         LIST_HEAD(layout_list);
619
620         spin_lock(&clp->cl_lock);
621         rcu_read_lock();
622 restart:
623         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
624                 if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
625                         continue;
626                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
627                                 server,
628                                 &layout_list) != 0)
629                         goto restart;
630         }
631         rcu_read_unlock();
632         spin_unlock(&clp->cl_lock);
633
634         if (list_empty(&layout_list))
635                 return 0;
636         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
637 }
638
639 int
640 pnfs_destroy_layouts_byclid(struct nfs_client *clp,
641                 bool is_recall)
642 {
643         struct nfs_server *server;
644         LIST_HEAD(layout_list);
645
646         spin_lock(&clp->cl_lock);
647         rcu_read_lock();
648 restart:
649         list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
650                 if (pnfs_layout_bulk_destroy_byserver_locked(clp,
651                                         server,
652                                         &layout_list) != 0)
653                         goto restart;
654         }
655         rcu_read_unlock();
656         spin_unlock(&clp->cl_lock);
657
658         if (list_empty(&layout_list))
659                 return 0;
660         return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
661 }
662
663 /*
664  * Called by the state manger to remove all layouts established under an
665  * expired lease.
666  */
667 void
668 pnfs_destroy_all_layouts(struct nfs_client *clp)
669 {
670         nfs4_deviceid_mark_client_invalid(clp);
671         nfs4_deviceid_purge_client(clp);
672
673         pnfs_destroy_layouts_byclid(clp, false);
674 }
675
676 /*
677  * Compare 2 layout stateid sequence ids, to see which is newer,
678  * taking into account wraparound issues.
679  */
680 static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
681 {
682         return (s32)(s1 - s2) > 0;
683 }
684
685 /* update lo->plh_stateid with new if is more recent */
686 void
687 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
688                         bool update_barrier)
689 {
690         u32 oldseq, newseq, new_barrier;
691         int empty = list_empty(&lo->plh_segs);
692
693         oldseq = be32_to_cpu(lo->plh_stateid.seqid);
694         newseq = be32_to_cpu(new->seqid);
695         if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
696                 nfs4_stateid_copy(&lo->plh_stateid, new);
697                 if (update_barrier) {
698                         new_barrier = be32_to_cpu(new->seqid);
699                 } else {
700                         /* Because of wraparound, we want to keep the barrier
701                          * "close" to the current seqids.
702                          */
703                         new_barrier = newseq - atomic_read(&lo->plh_outstanding);
704                 }
705                 if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
706                         lo->plh_barrier = new_barrier;
707         }
708 }
709
710 static bool
711 pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
712                 const nfs4_stateid *stateid)
713 {
714         u32 seqid = be32_to_cpu(stateid->seqid);
715
716         return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
717 }
718
719 /* lget is set to 1 if called from inside send_layoutget call chain */
720 static bool
721 pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
722 {
723         return lo->plh_block_lgets ||
724                 test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
725                 (list_empty(&lo->plh_segs) &&
726                  (atomic_read(&lo->plh_outstanding) > lget));
727 }
728
729 int
730 pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
731                               struct nfs4_state *open_state)
732 {
733         int status = 0;
734
735         dprintk("--> %s\n", __func__);
736         spin_lock(&lo->plh_inode->i_lock);
737         if (pnfs_layoutgets_blocked(lo, 1)) {
738                 status = -EAGAIN;
739         } else if (!nfs4_valid_open_stateid(open_state)) {
740                 status = -EBADF;
741         } else if (list_empty(&lo->plh_segs)) {
742                 int seq;
743
744                 do {
745                         seq = read_seqbegin(&open_state->seqlock);
746                         nfs4_stateid_copy(dst, &open_state->stateid);
747                 } while (read_seqretry(&open_state->seqlock, seq));
748         } else
749                 nfs4_stateid_copy(dst, &lo->plh_stateid);
750         spin_unlock(&lo->plh_inode->i_lock);
751         dprintk("<-- %s\n", __func__);
752         return status;
753 }
754
755 /*
756 * Get layout from server.
757 *    for now, assume that whole file layouts are requested.
758 *    arg->offset: 0
759 *    arg->length: all ones
760 */
761 static struct pnfs_layout_segment *
762 send_layoutget(struct pnfs_layout_hdr *lo,
763            struct nfs_open_context *ctx,
764            struct pnfs_layout_range *range,
765            gfp_t gfp_flags)
766 {
767         struct inode *ino = lo->plh_inode;
768         struct nfs_server *server = NFS_SERVER(ino);
769         struct nfs4_layoutget *lgp;
770         struct pnfs_layout_segment *lseg;
771
772         dprintk("--> %s\n", __func__);
773
774         lgp = kzalloc(sizeof(*lgp), gfp_flags);
775         if (lgp == NULL)
776                 return NULL;
777
778         lgp->args.minlength = PAGE_CACHE_SIZE;
779         if (lgp->args.minlength > range->length)
780                 lgp->args.minlength = range->length;
781         lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
782         lgp->args.range = *range;
783         lgp->args.type = server->pnfs_curr_ld->id;
784         lgp->args.inode = ino;
785         lgp->args.ctx = get_nfs_open_context(ctx);
786         lgp->gfp_flags = gfp_flags;
787         lgp->cred = lo->plh_lc_cred;
788
789         /* Synchronously retrieve layout information from server and
790          * store in lseg.
791          */
792         lseg = nfs4_proc_layoutget(lgp, gfp_flags);
793         if (IS_ERR(lseg)) {
794                 switch (PTR_ERR(lseg)) {
795                 case -ENOMEM:
796                 case -ERESTARTSYS:
797                         break;
798                 default:
799                         /* remember that LAYOUTGET failed and suspend trying */
800                         pnfs_layout_io_set_failed(lo, range->iomode);
801                 }
802                 return NULL;
803         }
804
805         return lseg;
806 }
807
808 static void pnfs_clear_layoutcommit(struct inode *inode,
809                 struct list_head *head)
810 {
811         struct nfs_inode *nfsi = NFS_I(inode);
812         struct pnfs_layout_segment *lseg, *tmp;
813
814         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
815                 return;
816         list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
817                 if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
818                         continue;
819                 pnfs_lseg_dec_and_remove_zero(lseg, head);
820         }
821 }
822
823 /*
824  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
825  * when the layout segment list is empty.
826  *
827  * Note that a pnfs_layout_hdr can exist with an empty layout segment
828  * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
829  * deviceid is marked invalid.
830  */
831 int
832 _pnfs_return_layout(struct inode *ino)
833 {
834         struct pnfs_layout_hdr *lo = NULL;
835         struct nfs_inode *nfsi = NFS_I(ino);
836         LIST_HEAD(tmp_list);
837         struct nfs4_layoutreturn *lrp;
838         nfs4_stateid stateid;
839         int status = 0, empty;
840
841         dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
842
843         spin_lock(&ino->i_lock);
844         lo = nfsi->layout;
845         if (!lo) {
846                 spin_unlock(&ino->i_lock);
847                 dprintk("NFS: %s no layout to return\n", __func__);
848                 goto out;
849         }
850         stateid = nfsi->layout->plh_stateid;
851         /* Reference matched in nfs4_layoutreturn_release */
852         pnfs_get_layout_hdr(lo);
853         empty = list_empty(&lo->plh_segs);
854         pnfs_clear_layoutcommit(ino, &tmp_list);
855         pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
856         /* Don't send a LAYOUTRETURN if list was initially empty */
857         if (empty) {
858                 spin_unlock(&ino->i_lock);
859                 pnfs_put_layout_hdr(lo);
860                 dprintk("NFS: %s no layout segments to return\n", __func__);
861                 goto out;
862         }
863         lo->plh_block_lgets++;
864         spin_unlock(&ino->i_lock);
865         pnfs_free_lseg_list(&tmp_list);
866
867         lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
868         if (unlikely(lrp == NULL)) {
869                 status = -ENOMEM;
870                 spin_lock(&ino->i_lock);
871                 lo->plh_block_lgets--;
872                 spin_unlock(&ino->i_lock);
873                 pnfs_put_layout_hdr(lo);
874                 goto out;
875         }
876
877         lrp->args.stateid = stateid;
878         lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
879         lrp->args.inode = ino;
880         lrp->args.layout = lo;
881         lrp->clp = NFS_SERVER(ino)->nfs_client;
882         lrp->cred = lo->plh_lc_cred;
883
884         status = nfs4_proc_layoutreturn(lrp);
885 out:
886         dprintk("<-- %s status: %d\n", __func__, status);
887         return status;
888 }
889 EXPORT_SYMBOL_GPL(_pnfs_return_layout);
890
891 int
892 pnfs_commit_and_return_layout(struct inode *inode)
893 {
894         struct pnfs_layout_hdr *lo;
895         int ret;
896
897         spin_lock(&inode->i_lock);
898         lo = NFS_I(inode)->layout;
899         if (lo == NULL) {
900                 spin_unlock(&inode->i_lock);
901                 return 0;
902         }
903         pnfs_get_layout_hdr(lo);
904         /* Block new layoutgets and read/write to ds */
905         lo->plh_block_lgets++;
906         spin_unlock(&inode->i_lock);
907         filemap_fdatawait(inode->i_mapping);
908         ret = pnfs_layoutcommit_inode(inode, true);
909         if (ret == 0)
910                 ret = _pnfs_return_layout(inode);
911         spin_lock(&inode->i_lock);
912         lo->plh_block_lgets--;
913         spin_unlock(&inode->i_lock);
914         pnfs_put_layout_hdr(lo);
915         return ret;
916 }
917
918 bool pnfs_roc(struct inode *ino)
919 {
920         struct pnfs_layout_hdr *lo;
921         struct pnfs_layout_segment *lseg, *tmp;
922         LIST_HEAD(tmp_list);
923         bool found = false;
924
925         spin_lock(&ino->i_lock);
926         lo = NFS_I(ino)->layout;
927         if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
928             test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
929                 goto out_nolayout;
930         list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
931                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
932                         mark_lseg_invalid(lseg, &tmp_list);
933                         found = true;
934                 }
935         if (!found)
936                 goto out_nolayout;
937         lo->plh_block_lgets++;
938         pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
939         spin_unlock(&ino->i_lock);
940         pnfs_free_lseg_list(&tmp_list);
941         return true;
942
943 out_nolayout:
944         spin_unlock(&ino->i_lock);
945         return false;
946 }
947
948 void pnfs_roc_release(struct inode *ino)
949 {
950         struct pnfs_layout_hdr *lo;
951
952         spin_lock(&ino->i_lock);
953         lo = NFS_I(ino)->layout;
954         lo->plh_block_lgets--;
955         if (atomic_dec_and_test(&lo->plh_refcount)) {
956                 pnfs_detach_layout_hdr(lo);
957                 spin_unlock(&ino->i_lock);
958                 pnfs_free_layout_hdr(lo);
959         } else
960                 spin_unlock(&ino->i_lock);
961 }
962
963 void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
964 {
965         struct pnfs_layout_hdr *lo;
966
967         spin_lock(&ino->i_lock);
968         lo = NFS_I(ino)->layout;
969         if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
970                 lo->plh_barrier = barrier;
971         spin_unlock(&ino->i_lock);
972 }
973
974 bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
975 {
976         struct nfs_inode *nfsi = NFS_I(ino);
977         struct pnfs_layout_hdr *lo;
978         struct pnfs_layout_segment *lseg;
979         u32 current_seqid;
980         bool found = false;
981
982         spin_lock(&ino->i_lock);
983         list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
984                 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
985                         rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
986                         found = true;
987                         goto out;
988                 }
989         lo = nfsi->layout;
990         current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
991
992         /* Since close does not return a layout stateid for use as
993          * a barrier, we choose the worst-case barrier.
994          */
995         *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
996 out:
997         spin_unlock(&ino->i_lock);
998         return found;
999 }
1000
1001 /*
1002  * Compare two layout segments for sorting into layout cache.
1003  * We want to preferentially return RW over RO layouts, so ensure those
1004  * are seen first.
1005  */
1006 static s64
1007 pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1008            const struct pnfs_layout_range *l2)
1009 {
1010         s64 d;
1011
1012         /* high offset > low offset */
1013         d = l1->offset - l2->offset;
1014         if (d)
1015                 return d;
1016
1017         /* short length > long length */
1018         d = l2->length - l1->length;
1019         if (d)
1020                 return d;
1021
1022         /* read > read/write */
1023         return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1024 }
1025
1026 static void
1027 pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1028                    struct pnfs_layout_segment *lseg)
1029 {
1030         struct pnfs_layout_segment *lp;
1031
1032         dprintk("%s:Begin\n", __func__);
1033
1034         list_for_each_entry(lp, &lo->plh_segs, pls_list) {
1035                 if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
1036                         continue;
1037                 list_add_tail(&lseg->pls_list, &lp->pls_list);
1038                 dprintk("%s: inserted lseg %p "
1039                         "iomode %d offset %llu length %llu before "
1040                         "lp %p iomode %d offset %llu length %llu\n",
1041                         __func__, lseg, lseg->pls_range.iomode,
1042                         lseg->pls_range.offset, lseg->pls_range.length,
1043                         lp, lp->pls_range.iomode, lp->pls_range.offset,
1044                         lp->pls_range.length);
1045                 goto out;
1046         }
1047         list_add_tail(&lseg->pls_list, &lo->plh_segs);
1048         dprintk("%s: inserted lseg %p "
1049                 "iomode %d offset %llu length %llu at tail\n",
1050                 __func__, lseg, lseg->pls_range.iomode,
1051                 lseg->pls_range.offset, lseg->pls_range.length);
1052 out:
1053         pnfs_get_layout_hdr(lo);
1054
1055         dprintk("%s:Return\n", __func__);
1056 }
1057
1058 static struct pnfs_layout_hdr *
1059 alloc_init_layout_hdr(struct inode *ino,
1060                       struct nfs_open_context *ctx,
1061                       gfp_t gfp_flags)
1062 {
1063         struct pnfs_layout_hdr *lo;
1064
1065         lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1066         if (!lo)
1067                 return NULL;
1068         atomic_set(&lo->plh_refcount, 1);
1069         INIT_LIST_HEAD(&lo->plh_layouts);
1070         INIT_LIST_HEAD(&lo->plh_segs);
1071         INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1072         lo->plh_inode = ino;
1073         lo->plh_lc_cred = get_rpccred(ctx->cred);
1074         return lo;
1075 }
1076
1077 static struct pnfs_layout_hdr *
1078 pnfs_find_alloc_layout(struct inode *ino,
1079                        struct nfs_open_context *ctx,
1080                        gfp_t gfp_flags)
1081 {
1082         struct nfs_inode *nfsi = NFS_I(ino);
1083         struct pnfs_layout_hdr *new = NULL;
1084
1085         dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
1086
1087         if (nfsi->layout != NULL)
1088                 goto out_existing;
1089         spin_unlock(&ino->i_lock);
1090         new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1091         spin_lock(&ino->i_lock);
1092
1093         if (likely(nfsi->layout == NULL)) {     /* Won the race? */
1094                 nfsi->layout = new;
1095                 return new;
1096         } else if (new != NULL)
1097                 pnfs_free_layout_hdr(new);
1098 out_existing:
1099         pnfs_get_layout_hdr(nfsi->layout);
1100         return nfsi->layout;
1101 }
1102
1103 /*
1104  * iomode matching rules:
1105  * iomode       lseg    match
1106  * -----        -----   -----
1107  * ANY          READ    true
1108  * ANY          RW      true
1109  * RW           READ    false
1110  * RW           RW      true
1111  * READ         READ    true
1112  * READ         RW      true
1113  */
1114 static bool
1115 pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1116                  const struct pnfs_layout_range *range)
1117 {
1118         struct pnfs_layout_range range1;
1119
1120         if ((range->iomode == IOMODE_RW &&
1121              ls_range->iomode != IOMODE_RW) ||
1122             !pnfs_lseg_range_intersecting(ls_range, range))
1123                 return 0;
1124
1125         /* range1 covers only the first byte in the range */
1126         range1 = *range;
1127         range1.length = 1;
1128         return pnfs_lseg_range_contained(ls_range, &range1);
1129 }
1130
1131 /*
1132  * lookup range in layout
1133  */
1134 static struct pnfs_layout_segment *
1135 pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1136                 struct pnfs_layout_range *range)
1137 {
1138         struct pnfs_layout_segment *lseg, *ret = NULL;
1139
1140         dprintk("%s:Begin\n", __func__);
1141
1142         list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1143                 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1144                     pnfs_lseg_range_match(&lseg->pls_range, range)) {
1145                         ret = pnfs_get_lseg(lseg);
1146                         break;
1147                 }
1148                 if (lseg->pls_range.offset > range->offset)
1149                         break;
1150         }
1151
1152         dprintk("%s:Return lseg %p ref %d\n",
1153                 __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1154         return ret;
1155 }
1156
1157 /*
1158  * Use mdsthreshold hints set at each OPEN to determine if I/O should go
1159  * to the MDS or over pNFS
1160  *
1161  * The nfs_inode read_io and write_io fields are cumulative counters reset
1162  * when there are no layout segments. Note that in pnfs_update_layout iomode
1163  * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
1164  * WRITE request.
1165  *
1166  * A return of true means use MDS I/O.
1167  *
1168  * From rfc 5661:
1169  * If a file's size is smaller than the file size threshold, data accesses
1170  * SHOULD be sent to the metadata server.  If an I/O request has a length that
1171  * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
1172  * server.  If both file size and I/O size are provided, the client SHOULD
1173  * reach or exceed  both thresholds before sending its read or write
1174  * requests to the data server.
1175  */
1176 static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
1177                                      struct inode *ino, int iomode)
1178 {
1179         struct nfs4_threshold *t = ctx->mdsthreshold;
1180         struct nfs_inode *nfsi = NFS_I(ino);
1181         loff_t fsize = i_size_read(ino);
1182         bool size = false, size_set = false, io = false, io_set = false, ret = false;
1183
1184         if (t == NULL)
1185                 return ret;
1186
1187         dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
1188                 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
1189
1190         switch (iomode) {
1191         case IOMODE_READ:
1192                 if (t->bm & THRESHOLD_RD) {
1193                         dprintk("%s fsize %llu\n", __func__, fsize);
1194                         size_set = true;
1195                         if (fsize < t->rd_sz)
1196                                 size = true;
1197                 }
1198                 if (t->bm & THRESHOLD_RD_IO) {
1199                         dprintk("%s nfsi->read_io %llu\n", __func__,
1200                                 nfsi->read_io);
1201                         io_set = true;
1202                         if (nfsi->read_io < t->rd_io_sz)
1203                                 io = true;
1204                 }
1205                 break;
1206         case IOMODE_RW:
1207                 if (t->bm & THRESHOLD_WR) {
1208                         dprintk("%s fsize %llu\n", __func__, fsize);
1209                         size_set = true;
1210                         if (fsize < t->wr_sz)
1211                                 size = true;
1212                 }
1213                 if (t->bm & THRESHOLD_WR_IO) {
1214                         dprintk("%s nfsi->write_io %llu\n", __func__,
1215                                 nfsi->write_io);
1216                         io_set = true;
1217                         if (nfsi->write_io < t->wr_io_sz)
1218                                 io = true;
1219                 }
1220                 break;
1221         }
1222         if (size_set && io_set) {
1223                 if (size && io)
1224                         ret = true;
1225         } else if (size || io)
1226                 ret = true;
1227
1228         dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1229         return ret;
1230 }
1231
1232 /*
1233  * Layout segment is retreived from the server if not cached.
1234  * The appropriate layout segment is referenced and returned to the caller.
1235  */
1236 struct pnfs_layout_segment *
1237 pnfs_update_layout(struct inode *ino,
1238                    struct nfs_open_context *ctx,
1239                    loff_t pos,
1240                    u64 count,
1241                    enum pnfs_iomode iomode,
1242                    gfp_t gfp_flags)
1243 {
1244         struct pnfs_layout_range arg = {
1245                 .iomode = iomode,
1246                 .offset = pos,
1247                 .length = count,
1248         };
1249         unsigned pg_offset;
1250         struct nfs_server *server = NFS_SERVER(ino);
1251         struct nfs_client *clp = server->nfs_client;
1252         struct pnfs_layout_hdr *lo;
1253         struct pnfs_layout_segment *lseg = NULL;
1254         bool first;
1255
1256         if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1257                 goto out;
1258
1259         if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1260                 goto out;
1261
1262         spin_lock(&ino->i_lock);
1263         lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1264         if (lo == NULL) {
1265                 spin_unlock(&ino->i_lock);
1266                 goto out;
1267         }
1268
1269         /* Do we even need to bother with this? */
1270         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1271                 dprintk("%s matches recall, use MDS\n", __func__);
1272                 goto out_unlock;
1273         }
1274
1275         /* if LAYOUTGET already failed once we don't try again */
1276         if (pnfs_layout_io_test_failed(lo, iomode))
1277                 goto out_unlock;
1278
1279         /* Check to see if the layout for the given range already exists */
1280         lseg = pnfs_find_lseg(lo, &arg);
1281         if (lseg)
1282                 goto out_unlock;
1283
1284         if (pnfs_layoutgets_blocked(lo, 0))
1285                 goto out_unlock;
1286         atomic_inc(&lo->plh_outstanding);
1287
1288         first = list_empty(&lo->plh_layouts) ? true : false;
1289         spin_unlock(&ino->i_lock);
1290
1291         if (first) {
1292                 /* The lo must be on the clp list if there is any
1293                  * chance of a CB_LAYOUTRECALL(FILE) coming in.
1294                  */
1295                 spin_lock(&clp->cl_lock);
1296                 list_add_tail(&lo->plh_layouts, &server->layouts);
1297                 spin_unlock(&clp->cl_lock);
1298         }
1299
1300         pg_offset = arg.offset & ~PAGE_CACHE_MASK;
1301         if (pg_offset) {
1302                 arg.offset -= pg_offset;
1303                 arg.length += pg_offset;
1304         }
1305         if (arg.length != NFS4_MAX_UINT64)
1306                 arg.length = PAGE_CACHE_ALIGN(arg.length);
1307
1308         lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1309         atomic_dec(&lo->plh_outstanding);
1310 out_put_layout_hdr:
1311         pnfs_put_layout_hdr(lo);
1312 out:
1313         dprintk("%s: inode %s/%llu pNFS layout segment %s for "
1314                         "(%s, offset: %llu, length: %llu)\n",
1315                         __func__, ino->i_sb->s_id,
1316                         (unsigned long long)NFS_FILEID(ino),
1317                         lseg == NULL ? "not found" : "found",
1318                         iomode==IOMODE_RW ?  "read/write" : "read-only",
1319                         (unsigned long long)pos,
1320                         (unsigned long long)count);
1321         return lseg;
1322 out_unlock:
1323         spin_unlock(&ino->i_lock);
1324         goto out_put_layout_hdr;
1325 }
1326 EXPORT_SYMBOL_GPL(pnfs_update_layout);
1327
1328 struct pnfs_layout_segment *
1329 pnfs_layout_process(struct nfs4_layoutget *lgp)
1330 {
1331         struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
1332         struct nfs4_layoutget_res *res = &lgp->res;
1333         struct pnfs_layout_segment *lseg;
1334         struct inode *ino = lo->plh_inode;
1335         LIST_HEAD(free_me);
1336         int status = 0;
1337
1338         /* Inject layout blob into I/O device driver */
1339         lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1340         if (!lseg || IS_ERR(lseg)) {
1341                 if (!lseg)
1342                         status = -ENOMEM;
1343                 else
1344                         status = PTR_ERR(lseg);
1345                 dprintk("%s: Could not allocate layout: error %d\n",
1346                        __func__, status);
1347                 goto out;
1348         }
1349
1350         init_lseg(lo, lseg);
1351         lseg->pls_range = res->range;
1352
1353         spin_lock(&ino->i_lock);
1354         if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1355                 dprintk("%s forget reply due to recall\n", __func__);
1356                 goto out_forget_reply;
1357         }
1358
1359         if (pnfs_layoutgets_blocked(lo, 1)) {
1360                 dprintk("%s forget reply due to state\n", __func__);
1361                 goto out_forget_reply;
1362         }
1363
1364         if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1365                 /* existing state ID, make sure the sequence number matches. */
1366                 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
1367                         dprintk("%s forget reply due to sequence\n", __func__);
1368                         goto out_forget_reply;
1369                 }
1370                 pnfs_set_layout_stateid(lo, &res->stateid, false);
1371         } else {
1372                 /*
1373                  * We got an entirely new state ID.  Mark all segments for the
1374                  * inode invalid, and don't bother validating the stateid
1375                  * sequence number.
1376                  */
1377                 pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
1378
1379                 nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
1380                 lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
1381         }
1382
1383         pnfs_get_lseg(lseg);
1384         pnfs_layout_insert_lseg(lo, lseg);
1385
1386         if (res->return_on_close) {
1387                 set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
1388                 set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
1389         }
1390
1391         spin_unlock(&ino->i_lock);
1392         pnfs_free_lseg_list(&free_me);
1393         return lseg;
1394 out:
1395         return ERR_PTR(status);
1396
1397 out_forget_reply:
1398         spin_unlock(&ino->i_lock);
1399         lseg->pls_layout = lo;
1400         NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1401         goto out;
1402 }
1403
1404 void
1405 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
1406 {
1407         u64 rd_size = req->wb_bytes;
1408
1409         WARN_ON_ONCE(pgio->pg_lseg != NULL);
1410
1411         if (pgio->pg_dreq == NULL)
1412                 rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
1413         else
1414                 rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
1415
1416         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1417                                            req->wb_context,
1418                                            req_offset(req),
1419                                            rd_size,
1420                                            IOMODE_READ,
1421                                            GFP_KERNEL);
1422         /* If no lseg, fall back to read through mds */
1423         if (pgio->pg_lseg == NULL)
1424                 nfs_pageio_reset_read_mds(pgio);
1425
1426 }
1427 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
1428
1429 void
1430 pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
1431                            struct nfs_page *req, u64 wb_size)
1432 {
1433         WARN_ON_ONCE(pgio->pg_lseg != NULL);
1434
1435         pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1436                                            req->wb_context,
1437                                            req_offset(req),
1438                                            wb_size,
1439                                            IOMODE_RW,
1440                                            GFP_NOFS);
1441         /* If no lseg, fall back to write through mds */
1442         if (pgio->pg_lseg == NULL)
1443                 nfs_pageio_reset_write_mds(pgio);
1444 }
1445 EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1446
1447 /*
1448  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
1449  * of bytes (maximum @req->wb_bytes) that can be coalesced.
1450  */
1451 size_t
1452 pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1453                      struct nfs_page *req)
1454 {
1455         unsigned int size;
1456         u64 seg_end, req_start, seg_left;
1457
1458         size = nfs_generic_pg_test(pgio, prev, req);
1459         if (!size)
1460                 return 0;
1461
1462         /*
1463          * 'size' contains the number of bytes left in the current page (up
1464          * to the original size asked for in @req->wb_bytes).
1465          *
1466          * Calculate how many bytes are left in the layout segment
1467          * and if there are less bytes than 'size', return that instead.
1468          *
1469          * Please also note that 'end_offset' is actually the offset of the
1470          * first byte that lies outside the pnfs_layout_range. FIXME?
1471          *
1472          */
1473         if (pgio->pg_lseg) {
1474                 seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
1475                                      pgio->pg_lseg->pls_range.length);
1476                 req_start = req_offset(req);
1477                 WARN_ON_ONCE(req_start > seg_end);
1478                 /* start of request is past the last byte of this segment */
1479                 if (req_start >= seg_end)
1480                         return 0;
1481
1482                 /* adjust 'size' iff there are fewer bytes left in the
1483                  * segment than what nfs_generic_pg_test returned */
1484                 seg_left = seg_end - req_start;
1485                 if (seg_left < size)
1486                         size = (unsigned int)seg_left;
1487         }
1488
1489         return size;
1490 }
1491 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1492
1493 int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
1494 {
1495         struct nfs_pageio_descriptor pgio;
1496
1497         /* Resend all requests through the MDS */
1498         nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
1499                               hdr->completion_ops);
1500         return nfs_pageio_resend(&pgio, hdr);
1501 }
1502 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1503
1504 static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
1505 {
1506
1507         dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1508         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1509             PNFS_LAYOUTRET_ON_ERROR) {
1510                 pnfs_return_layout(hdr->inode);
1511         }
1512         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1513                 hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
1514 }
1515
1516 /*
1517  * Called by non rpc-based layout drivers
1518  */
1519 void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
1520 {
1521         trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
1522         if (!hdr->pnfs_error) {
1523                 pnfs_set_layoutcommit(hdr);
1524                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1525         } else
1526                 pnfs_ld_handle_write_error(hdr);
1527         hdr->mds_ops->rpc_release(hdr);
1528 }
1529 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1530
1531 static void
1532 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1533                 struct nfs_pgio_header *hdr)
1534 {
1535         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1536                 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1537                 nfs_pageio_reset_write_mds(desc);
1538                 desc->pg_recoalesce = 1;
1539         }
1540         nfs_pgio_data_destroy(hdr);
1541 }
1542
1543 static enum pnfs_try_status
1544 pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
1545                         const struct rpc_call_ops *call_ops,
1546                         struct pnfs_layout_segment *lseg,
1547                         int how)
1548 {
1549         struct inode *inode = hdr->inode;
1550         enum pnfs_try_status trypnfs;
1551         struct nfs_server *nfss = NFS_SERVER(inode);
1552
1553         hdr->mds_ops = call_ops;
1554
1555         dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1556                 inode->i_ino, hdr->args.count, hdr->args.offset, how);
1557         trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
1558         if (trypnfs != PNFS_NOT_ATTEMPTED)
1559                 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1560         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1561         return trypnfs;
1562 }
1563
1564 static void
1565 pnfs_do_write(struct nfs_pageio_descriptor *desc,
1566               struct nfs_pgio_header *hdr, int how)
1567 {
1568         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1569         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1570         enum pnfs_try_status trypnfs;
1571
1572         desc->pg_lseg = NULL;
1573         trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
1574         if (trypnfs == PNFS_NOT_ATTEMPTED)
1575                 pnfs_write_through_mds(desc, hdr);
1576         pnfs_put_lseg(lseg);
1577 }
1578
1579 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1580 {
1581         pnfs_put_lseg(hdr->lseg);
1582         nfs_pgio_header_free(hdr);
1583 }
1584 EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1585
1586 int
1587 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1588 {
1589         struct nfs_pgio_header *hdr;
1590         int ret;
1591
1592         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1593         if (!hdr) {
1594                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1595                 pnfs_put_lseg(desc->pg_lseg);
1596                 desc->pg_lseg = NULL;
1597                 return -ENOMEM;
1598         }
1599         nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1600         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1601         ret = nfs_generic_pgio(desc, hdr);
1602         if (ret != 0) {
1603                 pnfs_put_lseg(desc->pg_lseg);
1604                 desc->pg_lseg = NULL;
1605         } else
1606                 pnfs_do_write(desc, hdr, desc->pg_ioflags);
1607         return ret;
1608 }
1609 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1610
1611 int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
1612 {
1613         struct nfs_pageio_descriptor pgio;
1614
1615         /* Resend all requests through the MDS */
1616         nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
1617         return nfs_pageio_resend(&pgio, hdr);
1618 }
1619 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1620
1621 static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
1622 {
1623         dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1624         if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1625             PNFS_LAYOUTRET_ON_ERROR) {
1626                 pnfs_return_layout(hdr->inode);
1627         }
1628         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1629                 hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
1630 }
1631
1632 /*
1633  * Called by non rpc-based layout drivers
1634  */
1635 void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
1636 {
1637         trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
1638         if (likely(!hdr->pnfs_error)) {
1639                 __nfs4_read_done_cb(hdr);
1640                 hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1641         } else
1642                 pnfs_ld_handle_read_error(hdr);
1643         hdr->mds_ops->rpc_release(hdr);
1644 }
1645 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1646
1647 static void
1648 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1649                 struct nfs_pgio_header *hdr)
1650 {
1651         if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1652                 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1653                 nfs_pageio_reset_read_mds(desc);
1654                 desc->pg_recoalesce = 1;
1655         }
1656         nfs_pgio_data_destroy(hdr);
1657 }
1658
1659 /*
1660  * Call the appropriate parallel I/O subsystem read function.
1661  */
1662 static enum pnfs_try_status
1663 pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
1664                        const struct rpc_call_ops *call_ops,
1665                        struct pnfs_layout_segment *lseg)
1666 {
1667         struct inode *inode = hdr->inode;
1668         struct nfs_server *nfss = NFS_SERVER(inode);
1669         enum pnfs_try_status trypnfs;
1670
1671         hdr->mds_ops = call_ops;
1672
1673         dprintk("%s: Reading ino:%lu %u@%llu\n",
1674                 __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
1675
1676         trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
1677         if (trypnfs != PNFS_NOT_ATTEMPTED)
1678                 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1679         dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1680         return trypnfs;
1681 }
1682
1683 static void
1684 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
1685 {
1686         const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
1687         struct pnfs_layout_segment *lseg = desc->pg_lseg;
1688         enum pnfs_try_status trypnfs;
1689
1690         desc->pg_lseg = NULL;
1691         trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
1692         if (trypnfs == PNFS_NOT_ATTEMPTED)
1693                 pnfs_read_through_mds(desc, hdr);
1694         pnfs_put_lseg(lseg);
1695 }
1696
1697 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1698 {
1699         pnfs_put_lseg(hdr->lseg);
1700         nfs_pgio_header_free(hdr);
1701 }
1702 EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1703
1704 int
1705 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1706 {
1707         struct nfs_pgio_header *hdr;
1708         int ret;
1709
1710         hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
1711         if (!hdr) {
1712                 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1713                 ret = -ENOMEM;
1714                 pnfs_put_lseg(desc->pg_lseg);
1715                 desc->pg_lseg = NULL;
1716                 return ret;
1717         }
1718         nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1719         hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1720         ret = nfs_generic_pgio(desc, hdr);
1721         if (ret != 0) {
1722                 pnfs_put_lseg(desc->pg_lseg);
1723                 desc->pg_lseg = NULL;
1724         } else
1725                 pnfs_do_read(desc, hdr);
1726         return ret;
1727 }
1728 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1729
1730 static void pnfs_clear_layoutcommitting(struct inode *inode)
1731 {
1732         unsigned long *bitlock = &NFS_I(inode)->flags;
1733
1734         clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
1735         smp_mb__after_atomic();
1736         wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
1737 }
1738
1739 /*
1740  * There can be multiple RW segments.
1741  */
1742 static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
1743 {
1744         struct pnfs_layout_segment *lseg;
1745
1746         list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
1747                 if (lseg->pls_range.iomode == IOMODE_RW &&
1748                     test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1749                         list_add(&lseg->pls_lc_list, listp);
1750         }
1751 }
1752
1753 static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
1754 {
1755         struct pnfs_layout_segment *lseg, *tmp;
1756
1757         /* Matched by references in pnfs_set_layoutcommit */
1758         list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
1759                 list_del_init(&lseg->pls_lc_list);
1760                 pnfs_put_lseg(lseg);
1761         }
1762
1763         pnfs_clear_layoutcommitting(inode);
1764 }
1765
1766 void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
1767 {
1768         pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
1769 }
1770 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1771
1772 void
1773 pnfs_set_layoutcommit(struct nfs_pgio_header *hdr)
1774 {
1775         struct inode *inode = hdr->inode;
1776         struct nfs_inode *nfsi = NFS_I(inode);
1777         loff_t end_pos = hdr->mds_offset + hdr->res.count;
1778         bool mark_as_dirty = false;
1779
1780         spin_lock(&inode->i_lock);
1781         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1782                 mark_as_dirty = true;
1783                 dprintk("%s: Set layoutcommit for inode %lu ",
1784                         __func__, inode->i_ino);
1785         }
1786         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1787                 /* references matched in nfs4_layoutcommit_release */
1788                 pnfs_get_lseg(hdr->lseg);
1789         }
1790         if (end_pos > nfsi->layout->plh_lwb)
1791                 nfsi->layout->plh_lwb = end_pos;
1792         spin_unlock(&inode->i_lock);
1793         dprintk("%s: lseg %p end_pos %llu\n",
1794                 __func__, hdr->lseg, nfsi->layout->plh_lwb);
1795
1796         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1797          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1798         if (mark_as_dirty)
1799                 mark_inode_dirty_sync(inode);
1800 }
1801 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1802
1803 void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data)
1804 {
1805         struct inode *inode = data->inode;
1806         struct nfs_inode *nfsi = NFS_I(inode);
1807         bool mark_as_dirty = false;
1808
1809         spin_lock(&inode->i_lock);
1810         if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1811                 mark_as_dirty = true;
1812                 dprintk("%s: Set layoutcommit for inode %lu ",
1813                         __func__, inode->i_ino);
1814         }
1815         if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) {
1816                 /* references matched in nfs4_layoutcommit_release */
1817                 pnfs_get_lseg(data->lseg);
1818         }
1819         if (data->lwb > nfsi->layout->plh_lwb)
1820                 nfsi->layout->plh_lwb = data->lwb;
1821         spin_unlock(&inode->i_lock);
1822         dprintk("%s: lseg %p end_pos %llu\n",
1823                 __func__, data->lseg, nfsi->layout->plh_lwb);
1824
1825         /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1826          * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1827         if (mark_as_dirty)
1828                 mark_inode_dirty_sync(inode);
1829 }
1830 EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit);
1831
1832 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
1833 {
1834         struct nfs_server *nfss = NFS_SERVER(data->args.inode);
1835
1836         if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
1837                 nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1838         pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
1839 }
1840
1841 /*
1842  * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
1843  * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
1844  * data to disk to allow the server to recover the data if it crashes.
1845  * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
1846  * is off, and a COMMIT is sent to a data server, or
1847  * if WRITEs to a data server return NFS_DATA_SYNC.
1848  */
1849 int
1850 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
1851 {
1852         struct nfs4_layoutcommit_data *data;
1853         struct nfs_inode *nfsi = NFS_I(inode);
1854         loff_t end_pos;
1855         int status;
1856
1857         if (!pnfs_layoutcommit_outstanding(inode))
1858                 return 0;
1859
1860         dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
1861
1862         status = -EAGAIN;
1863         if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1864                 if (!sync)
1865                         goto out;
1866                 status = wait_on_bit_lock_action(&nfsi->flags,
1867                                 NFS_INO_LAYOUTCOMMITTING,
1868                                 nfs_wait_bit_killable,
1869                                 TASK_KILLABLE);
1870                 if (status)
1871                         goto out;
1872         }
1873
1874         status = -ENOMEM;
1875         /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
1876         data = kzalloc(sizeof(*data), GFP_NOFS);
1877         if (!data)
1878                 goto clear_layoutcommitting;
1879
1880         status = 0;
1881         spin_lock(&inode->i_lock);
1882         if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
1883                 goto out_unlock;
1884
1885         INIT_LIST_HEAD(&data->lseg_list);
1886         pnfs_list_write_lseg(inode, &data->lseg_list);
1887
1888         end_pos = nfsi->layout->plh_lwb;
1889         nfsi->layout->plh_lwb = 0;
1890
1891         nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
1892         spin_unlock(&inode->i_lock);
1893
1894         data->args.inode = inode;
1895         data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
1896         nfs_fattr_init(&data->fattr);
1897         data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
1898         data->res.fattr = &data->fattr;
1899         data->args.lastbytewritten = end_pos - 1;
1900         data->res.server = NFS_SERVER(inode);
1901
1902         status = nfs4_proc_layoutcommit(data, sync);
1903 out:
1904         if (status)
1905                 mark_inode_dirty_sync(inode);
1906         dprintk("<-- %s status %d\n", __func__, status);
1907         return status;
1908 out_unlock:
1909         spin_unlock(&inode->i_lock);
1910         kfree(data);
1911 clear_layoutcommitting:
1912         pnfs_clear_layoutcommitting(inode);
1913         goto out;
1914 }
1915
1916 struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1917 {
1918         struct nfs4_threshold *thp;
1919
1920         thp = kzalloc(sizeof(*thp), GFP_NOFS);
1921         if (!thp) {
1922                 dprintk("%s mdsthreshold allocation failed\n", __func__);
1923                 return NULL;
1924         }
1925         return thp;
1926 }