]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/nfs/blocklayout/blocklayout.c
61a858cd54fe3f215e6bfaf2f3ea694200afe183
[karo-tx-linux.git] / fs / nfs / blocklayout / blocklayout.c
1 /*
2  *  linux/fs/nfs/blocklayout/blocklayout.c
3  *
4  *  Module for the NFSv4.1 pNFS block layout driver.
5  *
6  *  Copyright (c) 2006 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Andy Adamson <andros@citi.umich.edu>
10  *  Fred Isaman <iisaman@umich.edu>
11  *
12  * permission is granted to use, copy, create derivative works and
13  * redistribute this software and such derivative works for any purpose,
14  * so long as the name of the university of michigan is not used in
15  * any advertising or publicity pertaining to the use or distribution
16  * of this software without specific, written prior authorization.  if
17  * the above copyright notice or any other identification of the
18  * university of michigan is included in any copy of any portion of
19  * this software, then the disclaimer below must also be included.
20  *
21  * this software is provided as is, without representation from the
22  * university of michigan as to its fitness for any purpose, and without
23  * warranty by the university of michigan of any kind, either express
24  * or implied, including without limitation the implied warranties of
25  * merchantability and fitness for a particular purpose.  the regents
26  * of the university of michigan shall not be liable for any damages,
27  * including special, indirect, incidental, or consequential damages,
28  * with respect to any claim arising out or in connection with the use
29  * of the software, even if it has been or is hereafter advised of the
30  * possibility of such damages.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h>          /* struct bio */
38 #include <linux/prefetch.h>
39 #include <linux/pagevec.h>
40
41 #include "../pnfs.h"
42 #include "../nfs4session.h"
43 #include "../internal.h"
44 #include "blocklayout.h"
45
46 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
47
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
51
52 static bool is_hole(struct pnfs_block_extent *be)
53 {
54         switch (be->be_state) {
55         case PNFS_BLOCK_NONE_DATA:
56                 return true;
57         case PNFS_BLOCK_INVALID_DATA:
58                 return be->be_tag ? false : true;
59         default:
60                 return false;
61         }
62 }
63
64 /* The data we are handed might be spread across several bios.  We need
65  * to track when the last one is finished.
66  */
67 struct parallel_io {
68         struct kref refcnt;
69         void (*pnfs_callback) (void *data);
70         void *data;
71 };
72
73 static inline struct parallel_io *alloc_parallel(void *data)
74 {
75         struct parallel_io *rv;
76
77         rv  = kmalloc(sizeof(*rv), GFP_NOFS);
78         if (rv) {
79                 rv->data = data;
80                 kref_init(&rv->refcnt);
81         }
82         return rv;
83 }
84
85 static inline void get_parallel(struct parallel_io *p)
86 {
87         kref_get(&p->refcnt);
88 }
89
90 static void destroy_parallel(struct kref *kref)
91 {
92         struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
93
94         dprintk("%s enter\n", __func__);
95         p->pnfs_callback(p->data);
96         kfree(p);
97 }
98
99 static inline void put_parallel(struct parallel_io *p)
100 {
101         kref_put(&p->refcnt, destroy_parallel);
102 }
103
104 static struct bio *
105 bl_submit_bio(int rw, struct bio *bio)
106 {
107         if (bio) {
108                 get_parallel(bio->bi_private);
109                 dprintk("%s submitting %s bio %u@%llu\n", __func__,
110                         rw == READ ? "read" : "write", bio->bi_iter.bi_size,
111                         (unsigned long long)bio->bi_iter.bi_sector);
112                 submit_bio(rw, bio);
113         }
114         return NULL;
115 }
116
117 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
118                                      struct pnfs_block_extent *be,
119                                      void (*end_io)(struct bio *, int err),
120                                      struct parallel_io *par)
121 {
122         struct pnfs_block_dev *dev =
123                 container_of(be->be_device, struct pnfs_block_dev, d_node);
124         struct bio *bio;
125
126         npg = min(npg, BIO_MAX_PAGES);
127         bio = bio_alloc(GFP_NOIO, npg);
128         if (!bio && (current->flags & PF_MEMALLOC)) {
129                 while (!bio && (npg /= 2))
130                         bio = bio_alloc(GFP_NOIO, npg);
131         }
132
133         if (bio) {
134                 bio->bi_iter.bi_sector = isect - be->be_f_offset +
135                         be->be_v_offset;
136                 bio->bi_bdev = dev->d_bdev;
137                 bio->bi_end_io = end_io;
138                 bio->bi_private = par;
139         }
140         return bio;
141 }
142
143 static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
144                                       sector_t isect, struct page *page,
145                                       struct pnfs_block_extent *be,
146                                       void (*end_io)(struct bio *, int err),
147                                       struct parallel_io *par,
148                                       unsigned int offset, int len)
149 {
150         isect = isect + (offset >> SECTOR_SHIFT);
151         dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
152                 npg, rw, (unsigned long long)isect, offset, len);
153 retry:
154         if (!bio) {
155                 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
156                 if (!bio)
157                         return ERR_PTR(-ENOMEM);
158         }
159         if (bio_add_page(bio, page, len, offset) < len) {
160                 bio = bl_submit_bio(rw, bio);
161                 goto retry;
162         }
163         return bio;
164 }
165
166 static void bl_end_io_read(struct bio *bio, int err)
167 {
168         struct parallel_io *par = bio->bi_private;
169
170         if (err) {
171                 struct nfs_pgio_header *header = par->data;
172
173                 if (!header->pnfs_error)
174                         header->pnfs_error = -EIO;
175                 pnfs_set_lo_fail(header->lseg);
176         }
177
178         bio_put(bio);
179         put_parallel(par);
180 }
181
182 static void bl_read_cleanup(struct work_struct *work)
183 {
184         struct rpc_task *task;
185         struct nfs_pgio_header *hdr;
186         dprintk("%s enter\n", __func__);
187         task = container_of(work, struct rpc_task, u.tk_work);
188         hdr = container_of(task, struct nfs_pgio_header, task);
189         pnfs_ld_read_done(hdr);
190 }
191
192 static void
193 bl_end_par_io_read(void *data)
194 {
195         struct nfs_pgio_header *hdr = data;
196
197         hdr->task.tk_status = hdr->pnfs_error;
198         INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
199         schedule_work(&hdr->task.u.tk_work);
200 }
201
202 static enum pnfs_try_status
203 bl_read_pagelist(struct nfs_pgio_header *header)
204 {
205         struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
206         struct bio *bio = NULL;
207         struct pnfs_block_extent be;
208         sector_t isect, extent_length = 0;
209         struct parallel_io *par;
210         loff_t f_offset = header->args.offset;
211         size_t bytes_left = header->args.count;
212         unsigned int pg_offset, pg_len;
213         struct page **pages = header->args.pages;
214         int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
215         const bool is_dio = (header->dreq != NULL);
216         struct blk_plug plug;
217         int i;
218
219         dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
220                 header->page_array.npages, f_offset,
221                 (unsigned int)header->args.count);
222
223         par = alloc_parallel(header);
224         if (!par)
225                 return PNFS_NOT_ATTEMPTED;
226         par->pnfs_callback = bl_end_par_io_read;
227
228         blk_start_plug(&plug);
229
230         isect = (sector_t) (f_offset >> SECTOR_SHIFT);
231         /* Code assumes extents are page-aligned */
232         for (i = pg_index; i < header->page_array.npages; i++) {
233                 if (extent_length <= 0) {
234                         /* We've used up the previous extent */
235                         bio = bl_submit_bio(READ, bio);
236
237                         /* Get the next one */
238                         if (!ext_tree_lookup(bl, isect, &be, false)) {
239                                 header->pnfs_error = -EIO;
240                                 goto out;
241                         }
242                         extent_length = be.be_length - (isect - be.be_f_offset);
243                 }
244
245                 pg_offset = f_offset & ~PAGE_CACHE_MASK;
246                 if (is_dio) {
247                         if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
248                                 pg_len = PAGE_CACHE_SIZE - pg_offset;
249                         else
250                                 pg_len = bytes_left;
251
252                         f_offset += pg_len;
253                         bytes_left -= pg_len;
254                         isect += (pg_offset >> SECTOR_SHIFT);
255                         extent_length -= (pg_offset >> SECTOR_SHIFT);
256                 } else {
257                         BUG_ON(pg_offset != 0);
258                         pg_len = PAGE_CACHE_SIZE;
259                 }
260
261                 if (is_hole(&be)) {
262                         bio = bl_submit_bio(READ, bio);
263                         /* Fill hole w/ zeroes w/o accessing device */
264                         dprintk("%s Zeroing page for hole\n", __func__);
265                         zero_user_segment(pages[i], pg_offset, pg_len);
266                 } else {
267                         bio = do_add_page_to_bio(bio,
268                                                  header->page_array.npages - i,
269                                                  READ,
270                                                  isect, pages[i], &be,
271                                                  bl_end_io_read, par,
272                                                  pg_offset, pg_len);
273                         if (IS_ERR(bio)) {
274                                 header->pnfs_error = PTR_ERR(bio);
275                                 bio = NULL;
276                                 goto out;
277                         }
278                 }
279                 isect += (pg_len >> SECTOR_SHIFT);
280                 extent_length -= (pg_len >> SECTOR_SHIFT);
281         }
282         if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
283                 header->res.eof = 1;
284                 header->res.count = header->inode->i_size - header->args.offset;
285         } else {
286                 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
287         }
288 out:
289         bl_submit_bio(READ, bio);
290         blk_finish_plug(&plug);
291         put_parallel(par);
292         return PNFS_ATTEMPTED;
293 }
294
295 static void bl_end_io_write(struct bio *bio, int err)
296 {
297         struct parallel_io *par = bio->bi_private;
298         const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
299         struct nfs_pgio_header *header = par->data;
300
301         if (!uptodate) {
302                 if (!header->pnfs_error)
303                         header->pnfs_error = -EIO;
304                 pnfs_set_lo_fail(header->lseg);
305         }
306         bio_put(bio);
307         put_parallel(par);
308 }
309
310 /* Function scheduled for call during bl_end_par_io_write,
311  * it marks sectors as written and extends the commitlist.
312  */
313 static void bl_write_cleanup(struct work_struct *work)
314 {
315         struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
316         struct nfs_pgio_header *hdr =
317                         container_of(task, struct nfs_pgio_header, task);
318
319         dprintk("%s enter\n", __func__);
320
321         if (likely(!hdr->pnfs_error)) {
322                 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
323                 u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
324                 u64 end = (hdr->args.offset + hdr->args.count +
325                         PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
326
327                 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
328                                         (end - start) >> SECTOR_SHIFT);
329         }
330
331         pnfs_ld_write_done(hdr);
332 }
333
334 /* Called when last of bios associated with a bl_write_pagelist call finishes */
335 static void bl_end_par_io_write(void *data)
336 {
337         struct nfs_pgio_header *hdr = data;
338
339         hdr->task.tk_status = hdr->pnfs_error;
340         hdr->verf.committed = NFS_FILE_SYNC;
341         INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
342         schedule_work(&hdr->task.u.tk_work);
343 }
344
345 static enum pnfs_try_status
346 bl_write_pagelist(struct nfs_pgio_header *header, int sync)
347 {
348         struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
349         struct bio *bio = NULL;
350         struct pnfs_block_extent be;
351         sector_t isect, extent_length = 0;
352         struct parallel_io *par = NULL;
353         loff_t offset = header->args.offset;
354         size_t count = header->args.count;
355         struct page **pages = header->args.pages;
356         int pg_index = pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
357         struct blk_plug plug;
358         int i;
359
360         dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
361
362         /* At this point, header->page_aray is a (sequential) list of nfs_pages.
363          * We want to write each, and if there is an error set pnfs_error
364          * to have it redone using nfs.
365          */
366         par = alloc_parallel(header);
367         if (!par)
368                 return PNFS_NOT_ATTEMPTED;
369         par->pnfs_callback = bl_end_par_io_write;
370
371         blk_start_plug(&plug);
372
373         /* we always write out the whole page */
374         offset = offset & (loff_t)PAGE_CACHE_MASK;
375         isect = offset >> SECTOR_SHIFT;
376
377         for (i = pg_index; i < header->page_array.npages; i++) {
378                 if (extent_length <= 0) {
379                         /* We've used up the previous extent */
380                         bio = bl_submit_bio(WRITE, bio);
381                         /* Get the next one */
382                         if (!ext_tree_lookup(bl, isect, &be, true)) {
383                                 header->pnfs_error = -EINVAL;
384                                 goto out;
385                         }
386
387                         extent_length = be.be_length - (isect - be.be_f_offset);
388                 }
389
390                 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
391                                          WRITE, isect, pages[i], &be,
392                                          bl_end_io_write, par,
393                                          0, PAGE_CACHE_SIZE);
394                 if (IS_ERR(bio)) {
395                         header->pnfs_error = PTR_ERR(bio);
396                         bio = NULL;
397                         goto out;
398                 }
399                 offset += PAGE_CACHE_SIZE;
400                 count -= PAGE_CACHE_SIZE;
401                 isect += PAGE_CACHE_SECTORS;
402                 extent_length -= PAGE_CACHE_SECTORS;
403         }
404
405         header->res.count = header->args.count;
406 out:
407         bl_submit_bio(WRITE, bio);
408         blk_finish_plug(&plug);
409         put_parallel(par);
410         return PNFS_ATTEMPTED;
411 }
412
413 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
414 {
415         struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
416         int err;
417
418         dprintk("%s enter\n", __func__);
419
420         err = ext_tree_remove(bl, true, 0, LLONG_MAX);
421         WARN_ON(err);
422
423         kfree(bl);
424 }
425
426 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
427                                                    gfp_t gfp_flags)
428 {
429         struct pnfs_block_layout *bl;
430
431         dprintk("%s enter\n", __func__);
432         bl = kzalloc(sizeof(*bl), gfp_flags);
433         if (!bl)
434                 return NULL;
435
436         bl->bl_ext_rw = RB_ROOT;
437         bl->bl_ext_ro = RB_ROOT;
438         spin_lock_init(&bl->bl_ext_lock);
439
440         return &bl->bl_layout;
441 }
442
443 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
444 {
445         dprintk("%s enter\n", __func__);
446         kfree(lseg);
447 }
448
449 /* Tracks info needed to ensure extents in layout obey constraints of spec */
450 struct layout_verification {
451         u32 mode;       /* R or RW */
452         u64 start;      /* Expected start of next non-COW extent */
453         u64 inval;      /* Start of INVAL coverage */
454         u64 cowread;    /* End of COW read coverage */
455 };
456
457 /* Verify the extent meets the layout requirements of the pnfs-block draft,
458  * section 2.3.1.
459  */
460 static int verify_extent(struct pnfs_block_extent *be,
461                          struct layout_verification *lv)
462 {
463         if (lv->mode == IOMODE_READ) {
464                 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
465                     be->be_state == PNFS_BLOCK_INVALID_DATA)
466                         return -EIO;
467                 if (be->be_f_offset != lv->start)
468                         return -EIO;
469                 lv->start += be->be_length;
470                 return 0;
471         }
472         /* lv->mode == IOMODE_RW */
473         if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
474                 if (be->be_f_offset != lv->start)
475                         return -EIO;
476                 if (lv->cowread > lv->start)
477                         return -EIO;
478                 lv->start += be->be_length;
479                 lv->inval = lv->start;
480                 return 0;
481         } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
482                 if (be->be_f_offset != lv->start)
483                         return -EIO;
484                 lv->start += be->be_length;
485                 return 0;
486         } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
487                 if (be->be_f_offset > lv->start)
488                         return -EIO;
489                 if (be->be_f_offset < lv->inval)
490                         return -EIO;
491                 if (be->be_f_offset < lv->cowread)
492                         return -EIO;
493                 /* It looks like you might want to min this with lv->start,
494                  * but you really don't.
495                  */
496                 lv->inval = lv->inval + be->be_length;
497                 lv->cowread = be->be_f_offset + be->be_length;
498                 return 0;
499         } else
500                 return -EIO;
501 }
502
503 static int decode_sector_number(__be32 **rp, sector_t *sp)
504 {
505         uint64_t s;
506
507         *rp = xdr_decode_hyper(*rp, &s);
508         if (s & 0x1ff) {
509                 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
510                 return -1;
511         }
512         *sp = s >> SECTOR_SHIFT;
513         return 0;
514 }
515
516 /* XDR decode pnfs_block_layout4 structure */
517 static int
518 nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo,
519                            struct nfs4_layoutget_res *lgr, gfp_t gfp_flags)
520 {
521         struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
522         int i, status = -EIO;
523         uint32_t count;
524         struct pnfs_block_extent *be = NULL, *save;
525         struct xdr_stream stream;
526         struct xdr_buf buf;
527         struct page *scratch;
528         __be32 *p;
529         struct layout_verification lv = {
530                 .mode = lgr->range.iomode,
531                 .start = lgr->range.offset >> SECTOR_SHIFT,
532                 .inval = lgr->range.offset >> SECTOR_SHIFT,
533                 .cowread = lgr->range.offset >> SECTOR_SHIFT,
534         };
535         LIST_HEAD(extents);
536
537         dprintk("---> %s\n", __func__);
538
539         scratch = alloc_page(gfp_flags);
540         if (!scratch)
541                 return -ENOMEM;
542
543         xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
544         xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
545
546         p = xdr_inline_decode(&stream, 4);
547         if (unlikely(!p))
548                 goto out_err;
549
550         count = be32_to_cpup(p++);
551
552         dprintk("%s enter, number of extents %i\n", __func__, count);
553         p = xdr_inline_decode(&stream, (28 + NFS4_DEVICEID4_SIZE) * count);
554         if (unlikely(!p))
555                 goto out_err;
556
557         /* Decode individual extents, putting them in temporary
558          * staging area until whole layout is decoded to make error
559          * recovery easier.
560          */
561         for (i = 0; i < count; i++) {
562                 struct nfs4_deviceid id;
563
564                 be = kzalloc(sizeof(struct pnfs_block_extent), GFP_NOFS);
565                 if (!be) {
566                         status = -ENOMEM;
567                         goto out_err;
568                 }
569                 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
570                 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
571
572                 be->be_device =
573                         nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
574                                                 lo->plh_lc_cred, gfp_flags);
575                 if (!be->be_device)
576                         goto out_err;
577
578                 /* The next three values are read in as bytes,
579                  * but stored as 512-byte sector lengths
580                  */
581                 if (decode_sector_number(&p, &be->be_f_offset) < 0)
582                         goto out_err;
583                 if (decode_sector_number(&p, &be->be_length) < 0)
584                         goto out_err;
585                 if (decode_sector_number(&p, &be->be_v_offset) < 0)
586                         goto out_err;
587                 be->be_state = be32_to_cpup(p++);
588                 if (verify_extent(be, &lv)) {
589                         dprintk("%s verify failed\n", __func__);
590                         goto out_err;
591                 }
592                 list_add_tail(&be->be_list, &extents);
593         }
594         if (lgr->range.offset + lgr->range.length !=
595                         lv.start << SECTOR_SHIFT) {
596                 dprintk("%s Final length mismatch\n", __func__);
597                 be = NULL;
598                 goto out_err;
599         }
600         if (lv.start < lv.cowread) {
601                 dprintk("%s Final uncovered COW extent\n", __func__);
602                 be = NULL;
603                 goto out_err;
604         }
605         /* Extents decoded properly, now try to merge them in to
606          * existing layout extents.
607          */
608         list_for_each_entry_safe(be, save, &extents, be_list) {
609                 list_del(&be->be_list);
610
611                 status = ext_tree_insert(bl, be);
612                 if (status)
613                         goto out_free_list;
614         }
615         status = 0;
616  out:
617         __free_page(scratch);
618         dprintk("%s returns %i\n", __func__, status);
619         return status;
620
621  out_err:
622         nfs4_put_deviceid_node(be->be_device);
623         kfree(be);
624  out_free_list:
625         while (!list_empty(&extents)) {
626                 be = list_first_entry(&extents, struct pnfs_block_extent,
627                                       be_list);
628                 list_del(&be->be_list);
629                 nfs4_put_deviceid_node(be->be_device);
630                 kfree(be);
631         }
632         goto out;
633 }
634
635 /* We pretty much ignore lseg, and store all data layout wide, so we
636  * can correctly merge.
637  */
638 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
639                                                  struct nfs4_layoutget_res *lgr,
640                                                  gfp_t gfp_flags)
641 {
642         struct pnfs_layout_segment *lseg;
643         int status;
644
645         dprintk("%s enter\n", __func__);
646         lseg = kzalloc(sizeof(*lseg), gfp_flags);
647         if (!lseg)
648                 return ERR_PTR(-ENOMEM);
649         status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
650         if (status) {
651                 /* We don't want to call the full-blown bl_free_lseg,
652                  * since on error extents were not touched.
653                  */
654                 kfree(lseg);
655                 return ERR_PTR(status);
656         }
657         return lseg;
658 }
659
660 static void
661 bl_return_range(struct pnfs_layout_hdr *lo,
662                 struct pnfs_layout_range *range)
663 {
664         struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
665         sector_t offset = range->offset >> SECTOR_SHIFT, end;
666         int err;
667
668         if (range->offset % 8) {
669                 dprintk("%s: offset %lld not block size aligned\n",
670                         __func__, range->offset);
671                 return;
672         }
673
674         if (range->length != NFS4_MAX_UINT64) {
675                 if (range->length % 8) {
676                         dprintk("%s: length %lld not block size aligned\n",
677                                 __func__, range->length);
678                         return;
679                 }
680
681                 end = offset + (range->length >> SECTOR_SHIFT);
682         } else {
683                 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
684         }
685
686         err = ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
687 }
688
689 static int
690 bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
691 {
692         return ext_tree_prepare_commit(arg);
693 }
694
695 static void
696 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
697 {
698         ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
699 }
700
701 static int
702 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
703 {
704         dprintk("%s enter\n", __func__);
705
706         if (server->pnfs_blksize == 0) {
707                 dprintk("%s Server did not return blksize\n", __func__);
708                 return -EINVAL;
709         }
710         if (server->pnfs_blksize > PAGE_SIZE) {
711                 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
712                         __func__, server->pnfs_blksize);
713                 return -EINVAL;
714         }
715
716         return 0;
717 }
718
719 static bool
720 is_aligned_req(struct nfs_pageio_descriptor *pgio,
721                 struct nfs_page *req, unsigned int alignment)
722 {
723         /*
724          * Always accept buffered writes, higher layers take care of the
725          * right alignment.
726          */
727         if (pgio->pg_dreq == NULL)
728                 return true;
729
730         if (!IS_ALIGNED(req->wb_offset, alignment))
731                 return false;
732
733         if (IS_ALIGNED(req->wb_bytes, alignment))
734                 return true;
735
736         if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
737                 /*
738                  * If the write goes up to the inode size, just write
739                  * the full page.  Data past the inode size is
740                  * guaranteed to be zeroed by the higher level client
741                  * code, and this behaviour is mandated by RFC 5663
742                  * section 2.3.2.
743                  */
744                 return true;
745         }
746
747         return false;
748 }
749
750 static void
751 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
752 {
753         if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
754                 nfs_pageio_reset_read_mds(pgio);
755                 return;
756         }
757
758         pnfs_generic_pg_init_read(pgio, req);
759 }
760
761 /*
762  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
763  * of bytes (maximum @req->wb_bytes) that can be coalesced.
764  */
765 static size_t
766 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
767                 struct nfs_page *req)
768 {
769         if (!is_aligned_req(pgio, req, SECTOR_SIZE))
770                 return 0;
771         return pnfs_generic_pg_test(pgio, prev, req);
772 }
773
774 /*
775  * Return the number of contiguous bytes for a given inode
776  * starting at page frame idx.
777  */
778 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
779 {
780         struct address_space *mapping = inode->i_mapping;
781         pgoff_t end;
782
783         /* Optimize common case that writes from 0 to end of file */
784         end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
785         if (end != NFS_I(inode)->npages) {
786                 rcu_read_lock();
787                 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
788                 rcu_read_unlock();
789         }
790
791         if (!end)
792                 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
793         else
794                 return (end - idx) << PAGE_CACHE_SHIFT;
795 }
796
797 static void
798 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
799 {
800         u64 wb_size;
801
802         if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
803                 nfs_pageio_reset_write_mds(pgio);
804                 return;
805         }
806
807         if (pgio->pg_dreq == NULL)
808                 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
809                                               req->wb_index);
810         else
811                 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
812
813         pnfs_generic_pg_init_write(pgio, req, wb_size);
814 }
815
816 /*
817  * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
818  * of bytes (maximum @req->wb_bytes) that can be coalesced.
819  */
820 static size_t
821 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
822                  struct nfs_page *req)
823 {
824         if (!is_aligned_req(pgio, req, PAGE_SIZE))
825                 return 0;
826         return pnfs_generic_pg_test(pgio, prev, req);
827 }
828
829 static const struct nfs_pageio_ops bl_pg_read_ops = {
830         .pg_init = bl_pg_init_read,
831         .pg_test = bl_pg_test_read,
832         .pg_doio = pnfs_generic_pg_readpages,
833 };
834
835 static const struct nfs_pageio_ops bl_pg_write_ops = {
836         .pg_init = bl_pg_init_write,
837         .pg_test = bl_pg_test_write,
838         .pg_doio = pnfs_generic_pg_writepages,
839 };
840
841 static struct pnfs_layoutdriver_type blocklayout_type = {
842         .id                             = LAYOUT_BLOCK_VOLUME,
843         .name                           = "LAYOUT_BLOCK_VOLUME",
844         .owner                          = THIS_MODULE,
845         .flags                          = PNFS_LAYOUTRET_ON_SETATTR |
846                                           PNFS_READ_WHOLE_PAGE,
847         .read_pagelist                  = bl_read_pagelist,
848         .write_pagelist                 = bl_write_pagelist,
849         .alloc_layout_hdr               = bl_alloc_layout_hdr,
850         .free_layout_hdr                = bl_free_layout_hdr,
851         .alloc_lseg                     = bl_alloc_lseg,
852         .free_lseg                      = bl_free_lseg,
853         .return_range                   = bl_return_range,
854         .prepare_layoutcommit           = bl_prepare_layoutcommit,
855         .cleanup_layoutcommit           = bl_cleanup_layoutcommit,
856         .set_layoutdriver               = bl_set_layoutdriver,
857         .alloc_deviceid_node            = bl_alloc_deviceid_node,
858         .free_deviceid_node             = bl_free_deviceid_node,
859         .pg_read_ops                    = &bl_pg_read_ops,
860         .pg_write_ops                   = &bl_pg_write_ops,
861 };
862
863 static const struct rpc_pipe_ops bl_upcall_ops = {
864         .upcall         = rpc_pipe_generic_upcall,
865         .downcall       = bl_pipe_downcall,
866         .destroy_msg    = bl_pipe_destroy_msg,
867 };
868
869 static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
870                                             struct rpc_pipe *pipe)
871 {
872         struct dentry *dir, *dentry;
873
874         dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
875         if (dir == NULL)
876                 return ERR_PTR(-ENOENT);
877         dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
878         dput(dir);
879         return dentry;
880 }
881
882 static void nfs4blocklayout_unregister_sb(struct super_block *sb,
883                                           struct rpc_pipe *pipe)
884 {
885         if (pipe->dentry)
886                 rpc_unlink(pipe->dentry);
887 }
888
889 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
890                            void *ptr)
891 {
892         struct super_block *sb = ptr;
893         struct net *net = sb->s_fs_info;
894         struct nfs_net *nn = net_generic(net, nfs_net_id);
895         struct dentry *dentry;
896         int ret = 0;
897
898         if (!try_module_get(THIS_MODULE))
899                 return 0;
900
901         if (nn->bl_device_pipe == NULL) {
902                 module_put(THIS_MODULE);
903                 return 0;
904         }
905
906         switch (event) {
907         case RPC_PIPEFS_MOUNT:
908                 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
909                 if (IS_ERR(dentry)) {
910                         ret = PTR_ERR(dentry);
911                         break;
912                 }
913                 nn->bl_device_pipe->dentry = dentry;
914                 break;
915         case RPC_PIPEFS_UMOUNT:
916                 if (nn->bl_device_pipe->dentry)
917                         nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
918                 break;
919         default:
920                 ret = -ENOTSUPP;
921                 break;
922         }
923         module_put(THIS_MODULE);
924         return ret;
925 }
926
927 static struct notifier_block nfs4blocklayout_block = {
928         .notifier_call = rpc_pipefs_event,
929 };
930
931 static struct dentry *nfs4blocklayout_register_net(struct net *net,
932                                                    struct rpc_pipe *pipe)
933 {
934         struct super_block *pipefs_sb;
935         struct dentry *dentry;
936
937         pipefs_sb = rpc_get_sb_net(net);
938         if (!pipefs_sb)
939                 return NULL;
940         dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
941         rpc_put_sb_net(net);
942         return dentry;
943 }
944
945 static void nfs4blocklayout_unregister_net(struct net *net,
946                                            struct rpc_pipe *pipe)
947 {
948         struct super_block *pipefs_sb;
949
950         pipefs_sb = rpc_get_sb_net(net);
951         if (pipefs_sb) {
952                 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
953                 rpc_put_sb_net(net);
954         }
955 }
956
957 static int nfs4blocklayout_net_init(struct net *net)
958 {
959         struct nfs_net *nn = net_generic(net, nfs_net_id);
960         struct dentry *dentry;
961
962         init_waitqueue_head(&nn->bl_wq);
963         nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
964         if (IS_ERR(nn->bl_device_pipe))
965                 return PTR_ERR(nn->bl_device_pipe);
966         dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
967         if (IS_ERR(dentry)) {
968                 rpc_destroy_pipe_data(nn->bl_device_pipe);
969                 return PTR_ERR(dentry);
970         }
971         nn->bl_device_pipe->dentry = dentry;
972         return 0;
973 }
974
975 static void nfs4blocklayout_net_exit(struct net *net)
976 {
977         struct nfs_net *nn = net_generic(net, nfs_net_id);
978
979         nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
980         rpc_destroy_pipe_data(nn->bl_device_pipe);
981         nn->bl_device_pipe = NULL;
982 }
983
984 static struct pernet_operations nfs4blocklayout_net_ops = {
985         .init = nfs4blocklayout_net_init,
986         .exit = nfs4blocklayout_net_exit,
987 };
988
989 static int __init nfs4blocklayout_init(void)
990 {
991         int ret;
992
993         dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
994
995         ret = pnfs_register_layoutdriver(&blocklayout_type);
996         if (ret)
997                 goto out;
998
999         ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1000         if (ret)
1001                 goto out_remove;
1002         ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1003         if (ret)
1004                 goto out_notifier;
1005 out:
1006         return ret;
1007
1008 out_notifier:
1009         rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1010 out_remove:
1011         pnfs_unregister_layoutdriver(&blocklayout_type);
1012         return ret;
1013 }
1014
1015 static void __exit nfs4blocklayout_exit(void)
1016 {
1017         dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1018                __func__);
1019
1020         rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1021         unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1022         pnfs_unregister_layoutdriver(&blocklayout_type);
1023 }
1024
1025 MODULE_ALIAS("nfs-layouttype4-3");
1026
1027 module_init(nfs4blocklayout_init);
1028 module_exit(nfs4blocklayout_exit);