2 * linux/fs/nfs/blocklayout/blocklayout.c
4 * Module for the NFSv4.1 pNFS block layout driver.
6 * Copyright (c) 2006 The Regents of the University of Michigan.
9 * Andy Adamson <andros@citi.umich.edu>
10 * Fred Isaman <iisaman@umich.edu>
12 * permission is granted to use, copy, create derivative works and
13 * redistribute this software and such derivative works for any purpose,
14 * so long as the name of the university of michigan is not used in
15 * any advertising or publicity pertaining to the use or distribution
16 * of this software without specific, written prior authorization. if
17 * the above copyright notice or any other identification of the
18 * university of michigan is included in any copy of any portion of
19 * this software, then the disclaimer below must also be included.
21 * this software is provided as is, without representation from the
22 * university of michigan as to its fitness for any purpose, and without
23 * warranty by the university of michigan of any kind, either express
24 * or implied, including without limitation the implied warranties of
25 * merchantability and fitness for a particular purpose. the regents
26 * of the university of michigan shall not be liable for any damages,
27 * including special, indirect, incidental, or consequential damages,
28 * with respect to any claim arising out or in connection with the use
29 * of the software, even if it has been or is hereafter advised of the
30 * possibility of such damages.
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/mount.h>
36 #include <linux/namei.h>
37 #include <linux/bio.h> /* struct bio */
38 #include <linux/prefetch.h>
39 #include <linux/pagevec.h>
42 #include "../nfs4session.h"
43 #include "../internal.h"
44 #include "blocklayout.h"
46 #define NFSDBG_FACILITY NFSDBG_PNFS_LD
48 MODULE_LICENSE("GPL");
49 MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
50 MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
52 static bool is_hole(struct pnfs_block_extent *be)
54 switch (be->be_state) {
55 case PNFS_BLOCK_NONE_DATA:
57 case PNFS_BLOCK_INVALID_DATA:
58 return be->be_tag ? false : true;
64 /* The data we are handed might be spread across several bios. We need
65 * to track when the last one is finished.
69 void (*pnfs_callback) (void *data);
73 static inline struct parallel_io *alloc_parallel(void *data)
75 struct parallel_io *rv;
77 rv = kmalloc(sizeof(*rv), GFP_NOFS);
80 kref_init(&rv->refcnt);
85 static inline void get_parallel(struct parallel_io *p)
90 static void destroy_parallel(struct kref *kref)
92 struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
94 dprintk("%s enter\n", __func__);
95 p->pnfs_callback(p->data);
99 static inline void put_parallel(struct parallel_io *p)
101 kref_put(&p->refcnt, destroy_parallel);
105 bl_submit_bio(int rw, struct bio *bio)
108 get_parallel(bio->bi_private);
109 dprintk("%s submitting %s bio %u@%llu\n", __func__,
110 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
111 (unsigned long long)bio->bi_iter.bi_sector);
117 static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
118 struct pnfs_block_extent *be,
119 void (*end_io)(struct bio *, int err),
120 struct parallel_io *par)
122 struct pnfs_block_dev *dev =
123 container_of(be->be_device, struct pnfs_block_dev, d_node);
126 npg = min(npg, BIO_MAX_PAGES);
127 bio = bio_alloc(GFP_NOIO, npg);
128 if (!bio && (current->flags & PF_MEMALLOC)) {
129 while (!bio && (npg /= 2))
130 bio = bio_alloc(GFP_NOIO, npg);
134 bio->bi_iter.bi_sector = isect - be->be_f_offset +
136 bio->bi_bdev = dev->d_bdev;
137 bio->bi_end_io = end_io;
138 bio->bi_private = par;
143 static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
144 sector_t isect, struct page *page,
145 struct pnfs_block_extent *be,
146 void (*end_io)(struct bio *, int err),
147 struct parallel_io *par,
148 unsigned int offset, int len)
150 isect = isect + (offset >> SECTOR_SHIFT);
151 dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
152 npg, rw, (unsigned long long)isect, offset, len);
155 bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
157 return ERR_PTR(-ENOMEM);
159 if (bio_add_page(bio, page, len, offset) < len) {
160 bio = bl_submit_bio(rw, bio);
166 static void bl_end_io_read(struct bio *bio, int err)
168 struct parallel_io *par = bio->bi_private;
171 struct nfs_pgio_header *header = par->data;
173 if (!header->pnfs_error)
174 header->pnfs_error = -EIO;
175 pnfs_set_lo_fail(header->lseg);
182 static void bl_read_cleanup(struct work_struct *work)
184 struct rpc_task *task;
185 struct nfs_pgio_header *hdr;
186 dprintk("%s enter\n", __func__);
187 task = container_of(work, struct rpc_task, u.tk_work);
188 hdr = container_of(task, struct nfs_pgio_header, task);
189 pnfs_ld_read_done(hdr);
193 bl_end_par_io_read(void *data)
195 struct nfs_pgio_header *hdr = data;
197 hdr->task.tk_status = hdr->pnfs_error;
198 INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
199 schedule_work(&hdr->task.u.tk_work);
202 static enum pnfs_try_status
203 bl_read_pagelist(struct nfs_pgio_header *header)
205 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
206 struct bio *bio = NULL;
207 struct pnfs_block_extent be;
208 sector_t isect, extent_length = 0;
209 struct parallel_io *par;
210 loff_t f_offset = header->args.offset;
211 size_t bytes_left = header->args.count;
212 unsigned int pg_offset, pg_len;
213 struct page **pages = header->args.pages;
214 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
215 const bool is_dio = (header->dreq != NULL);
216 struct blk_plug plug;
219 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
220 header->page_array.npages, f_offset,
221 (unsigned int)header->args.count);
223 par = alloc_parallel(header);
225 return PNFS_NOT_ATTEMPTED;
226 par->pnfs_callback = bl_end_par_io_read;
228 blk_start_plug(&plug);
230 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
231 /* Code assumes extents are page-aligned */
232 for (i = pg_index; i < header->page_array.npages; i++) {
233 if (extent_length <= 0) {
234 /* We've used up the previous extent */
235 bio = bl_submit_bio(READ, bio);
237 /* Get the next one */
238 if (!ext_tree_lookup(bl, isect, &be, false)) {
239 header->pnfs_error = -EIO;
242 extent_length = be.be_length - (isect - be.be_f_offset);
245 pg_offset = f_offset & ~PAGE_CACHE_MASK;
247 if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
248 pg_len = PAGE_CACHE_SIZE - pg_offset;
253 bytes_left -= pg_len;
254 isect += (pg_offset >> SECTOR_SHIFT);
255 extent_length -= (pg_offset >> SECTOR_SHIFT);
257 BUG_ON(pg_offset != 0);
258 pg_len = PAGE_CACHE_SIZE;
262 bio = bl_submit_bio(READ, bio);
263 /* Fill hole w/ zeroes w/o accessing device */
264 dprintk("%s Zeroing page for hole\n", __func__);
265 zero_user_segment(pages[i], pg_offset, pg_len);
267 bio = do_add_page_to_bio(bio,
268 header->page_array.npages - i,
270 isect, pages[i], &be,
274 header->pnfs_error = PTR_ERR(bio);
279 isect += (pg_len >> SECTOR_SHIFT);
280 extent_length -= (pg_len >> SECTOR_SHIFT);
282 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
284 header->res.count = header->inode->i_size - header->args.offset;
286 header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
289 bl_submit_bio(READ, bio);
290 blk_finish_plug(&plug);
292 return PNFS_ATTEMPTED;
295 static void bl_end_io_write(struct bio *bio, int err)
297 struct parallel_io *par = bio->bi_private;
298 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
299 struct nfs_pgio_header *header = par->data;
302 if (!header->pnfs_error)
303 header->pnfs_error = -EIO;
304 pnfs_set_lo_fail(header->lseg);
310 /* Function scheduled for call during bl_end_par_io_write,
311 * it marks sectors as written and extends the commitlist.
313 static void bl_write_cleanup(struct work_struct *work)
315 struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
316 struct nfs_pgio_header *hdr =
317 container_of(task, struct nfs_pgio_header, task);
319 dprintk("%s enter\n", __func__);
321 if (likely(!hdr->pnfs_error)) {
322 struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
323 u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
324 u64 end = (hdr->args.offset + hdr->args.count +
325 PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
327 ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
328 (end - start) >> SECTOR_SHIFT);
331 pnfs_ld_write_done(hdr);
334 /* Called when last of bios associated with a bl_write_pagelist call finishes */
335 static void bl_end_par_io_write(void *data)
337 struct nfs_pgio_header *hdr = data;
339 hdr->task.tk_status = hdr->pnfs_error;
340 hdr->verf.committed = NFS_FILE_SYNC;
341 INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
342 schedule_work(&hdr->task.u.tk_work);
345 static enum pnfs_try_status
346 bl_write_pagelist(struct nfs_pgio_header *header, int sync)
348 struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
349 struct bio *bio = NULL;
350 struct pnfs_block_extent be;
351 sector_t isect, extent_length = 0;
352 struct parallel_io *par = NULL;
353 loff_t offset = header->args.offset;
354 size_t count = header->args.count;
355 struct page **pages = header->args.pages;
356 int pg_index = pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
357 struct blk_plug plug;
360 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
362 /* At this point, header->page_aray is a (sequential) list of nfs_pages.
363 * We want to write each, and if there is an error set pnfs_error
364 * to have it redone using nfs.
366 par = alloc_parallel(header);
368 return PNFS_NOT_ATTEMPTED;
369 par->pnfs_callback = bl_end_par_io_write;
371 blk_start_plug(&plug);
373 /* we always write out the whole page */
374 offset = offset & (loff_t)PAGE_CACHE_MASK;
375 isect = offset >> SECTOR_SHIFT;
377 for (i = pg_index; i < header->page_array.npages; i++) {
378 if (extent_length <= 0) {
379 /* We've used up the previous extent */
380 bio = bl_submit_bio(WRITE, bio);
381 /* Get the next one */
382 if (!ext_tree_lookup(bl, isect, &be, true)) {
383 header->pnfs_error = -EINVAL;
387 extent_length = be.be_length - (isect - be.be_f_offset);
390 bio = do_add_page_to_bio(bio, header->page_array.npages - i,
391 WRITE, isect, pages[i], &be,
392 bl_end_io_write, par,
395 header->pnfs_error = PTR_ERR(bio);
399 offset += PAGE_CACHE_SIZE;
400 count -= PAGE_CACHE_SIZE;
401 isect += PAGE_CACHE_SECTORS;
402 extent_length -= PAGE_CACHE_SECTORS;
405 header->res.count = header->args.count;
407 bl_submit_bio(WRITE, bio);
408 blk_finish_plug(&plug);
410 return PNFS_ATTEMPTED;
413 static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
415 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
418 dprintk("%s enter\n", __func__);
420 err = ext_tree_remove(bl, true, 0, LLONG_MAX);
426 static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
429 struct pnfs_block_layout *bl;
431 dprintk("%s enter\n", __func__);
432 bl = kzalloc(sizeof(*bl), gfp_flags);
436 bl->bl_ext_rw = RB_ROOT;
437 bl->bl_ext_ro = RB_ROOT;
438 spin_lock_init(&bl->bl_ext_lock);
440 return &bl->bl_layout;
443 static void bl_free_lseg(struct pnfs_layout_segment *lseg)
445 dprintk("%s enter\n", __func__);
449 /* Tracks info needed to ensure extents in layout obey constraints of spec */
450 struct layout_verification {
451 u32 mode; /* R or RW */
452 u64 start; /* Expected start of next non-COW extent */
453 u64 inval; /* Start of INVAL coverage */
454 u64 cowread; /* End of COW read coverage */
457 /* Verify the extent meets the layout requirements of the pnfs-block draft,
460 static int verify_extent(struct pnfs_block_extent *be,
461 struct layout_verification *lv)
463 if (lv->mode == IOMODE_READ) {
464 if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
465 be->be_state == PNFS_BLOCK_INVALID_DATA)
467 if (be->be_f_offset != lv->start)
469 lv->start += be->be_length;
472 /* lv->mode == IOMODE_RW */
473 if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
474 if (be->be_f_offset != lv->start)
476 if (lv->cowread > lv->start)
478 lv->start += be->be_length;
479 lv->inval = lv->start;
481 } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
482 if (be->be_f_offset != lv->start)
484 lv->start += be->be_length;
486 } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
487 if (be->be_f_offset > lv->start)
489 if (be->be_f_offset < lv->inval)
491 if (be->be_f_offset < lv->cowread)
493 /* It looks like you might want to min this with lv->start,
494 * but you really don't.
496 lv->inval = lv->inval + be->be_length;
497 lv->cowread = be->be_f_offset + be->be_length;
503 static int decode_sector_number(__be32 **rp, sector_t *sp)
507 *rp = xdr_decode_hyper(*rp, &s);
509 printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
512 *sp = s >> SECTOR_SHIFT;
516 /* XDR decode pnfs_block_layout4 structure */
518 nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo,
519 struct nfs4_layoutget_res *lgr, gfp_t gfp_flags)
521 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
522 int i, status = -EIO;
524 struct pnfs_block_extent *be = NULL, *save;
525 struct xdr_stream stream;
527 struct page *scratch;
529 struct layout_verification lv = {
530 .mode = lgr->range.iomode,
531 .start = lgr->range.offset >> SECTOR_SHIFT,
532 .inval = lgr->range.offset >> SECTOR_SHIFT,
533 .cowread = lgr->range.offset >> SECTOR_SHIFT,
537 dprintk("---> %s\n", __func__);
539 scratch = alloc_page(gfp_flags);
543 xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
544 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
546 p = xdr_inline_decode(&stream, 4);
550 count = be32_to_cpup(p++);
552 dprintk("%s enter, number of extents %i\n", __func__, count);
553 p = xdr_inline_decode(&stream, (28 + NFS4_DEVICEID4_SIZE) * count);
557 /* Decode individual extents, putting them in temporary
558 * staging area until whole layout is decoded to make error
561 for (i = 0; i < count; i++) {
562 struct nfs4_deviceid id;
564 be = kzalloc(sizeof(struct pnfs_block_extent), GFP_NOFS);
569 memcpy(&id, p, NFS4_DEVICEID4_SIZE);
570 p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
573 nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
574 lo->plh_lc_cred, gfp_flags);
578 /* The next three values are read in as bytes,
579 * but stored as 512-byte sector lengths
581 if (decode_sector_number(&p, &be->be_f_offset) < 0)
583 if (decode_sector_number(&p, &be->be_length) < 0)
585 if (decode_sector_number(&p, &be->be_v_offset) < 0)
587 be->be_state = be32_to_cpup(p++);
588 if (verify_extent(be, &lv)) {
589 dprintk("%s verify failed\n", __func__);
592 list_add_tail(&be->be_list, &extents);
594 if (lgr->range.offset + lgr->range.length !=
595 lv.start << SECTOR_SHIFT) {
596 dprintk("%s Final length mismatch\n", __func__);
600 if (lv.start < lv.cowread) {
601 dprintk("%s Final uncovered COW extent\n", __func__);
605 /* Extents decoded properly, now try to merge them in to
606 * existing layout extents.
608 list_for_each_entry_safe(be, save, &extents, be_list) {
609 list_del(&be->be_list);
611 status = ext_tree_insert(bl, be);
617 __free_page(scratch);
618 dprintk("%s returns %i\n", __func__, status);
622 nfs4_put_deviceid_node(be->be_device);
625 while (!list_empty(&extents)) {
626 be = list_first_entry(&extents, struct pnfs_block_extent,
628 list_del(&be->be_list);
629 nfs4_put_deviceid_node(be->be_device);
635 /* We pretty much ignore lseg, and store all data layout wide, so we
636 * can correctly merge.
638 static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
639 struct nfs4_layoutget_res *lgr,
642 struct pnfs_layout_segment *lseg;
645 dprintk("%s enter\n", __func__);
646 lseg = kzalloc(sizeof(*lseg), gfp_flags);
648 return ERR_PTR(-ENOMEM);
649 status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
651 /* We don't want to call the full-blown bl_free_lseg,
652 * since on error extents were not touched.
655 return ERR_PTR(status);
661 bl_return_range(struct pnfs_layout_hdr *lo,
662 struct pnfs_layout_range *range)
664 struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
665 sector_t offset = range->offset >> SECTOR_SHIFT, end;
668 if (range->offset % 8) {
669 dprintk("%s: offset %lld not block size aligned\n",
670 __func__, range->offset);
674 if (range->length != NFS4_MAX_UINT64) {
675 if (range->length % 8) {
676 dprintk("%s: length %lld not block size aligned\n",
677 __func__, range->length);
681 end = offset + (range->length >> SECTOR_SHIFT);
683 end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
686 err = ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
690 bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
692 return ext_tree_prepare_commit(arg);
696 bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
698 ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
702 bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
704 dprintk("%s enter\n", __func__);
706 if (server->pnfs_blksize == 0) {
707 dprintk("%s Server did not return blksize\n", __func__);
710 if (server->pnfs_blksize > PAGE_SIZE) {
711 printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
712 __func__, server->pnfs_blksize);
720 is_aligned_req(struct nfs_pageio_descriptor *pgio,
721 struct nfs_page *req, unsigned int alignment)
724 * Always accept buffered writes, higher layers take care of the
727 if (pgio->pg_dreq == NULL)
730 if (!IS_ALIGNED(req->wb_offset, alignment))
733 if (IS_ALIGNED(req->wb_bytes, alignment))
736 if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
738 * If the write goes up to the inode size, just write
739 * the full page. Data past the inode size is
740 * guaranteed to be zeroed by the higher level client
741 * code, and this behaviour is mandated by RFC 5663
751 bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
753 if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
754 nfs_pageio_reset_read_mds(pgio);
758 pnfs_generic_pg_init_read(pgio, req);
762 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
763 * of bytes (maximum @req->wb_bytes) that can be coalesced.
766 bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
767 struct nfs_page *req)
769 if (!is_aligned_req(pgio, req, SECTOR_SIZE))
771 return pnfs_generic_pg_test(pgio, prev, req);
775 * Return the number of contiguous bytes for a given inode
776 * starting at page frame idx.
778 static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
780 struct address_space *mapping = inode->i_mapping;
783 /* Optimize common case that writes from 0 to end of file */
784 end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
785 if (end != NFS_I(inode)->npages) {
787 end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
792 return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
794 return (end - idx) << PAGE_CACHE_SHIFT;
798 bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
802 if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
803 nfs_pageio_reset_write_mds(pgio);
807 if (pgio->pg_dreq == NULL)
808 wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
811 wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
813 pnfs_generic_pg_init_write(pgio, req, wb_size);
817 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
818 * of bytes (maximum @req->wb_bytes) that can be coalesced.
821 bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
822 struct nfs_page *req)
824 if (!is_aligned_req(pgio, req, PAGE_SIZE))
826 return pnfs_generic_pg_test(pgio, prev, req);
829 static const struct nfs_pageio_ops bl_pg_read_ops = {
830 .pg_init = bl_pg_init_read,
831 .pg_test = bl_pg_test_read,
832 .pg_doio = pnfs_generic_pg_readpages,
835 static const struct nfs_pageio_ops bl_pg_write_ops = {
836 .pg_init = bl_pg_init_write,
837 .pg_test = bl_pg_test_write,
838 .pg_doio = pnfs_generic_pg_writepages,
841 static struct pnfs_layoutdriver_type blocklayout_type = {
842 .id = LAYOUT_BLOCK_VOLUME,
843 .name = "LAYOUT_BLOCK_VOLUME",
844 .owner = THIS_MODULE,
845 .flags = PNFS_LAYOUTRET_ON_SETATTR |
846 PNFS_READ_WHOLE_PAGE,
847 .read_pagelist = bl_read_pagelist,
848 .write_pagelist = bl_write_pagelist,
849 .alloc_layout_hdr = bl_alloc_layout_hdr,
850 .free_layout_hdr = bl_free_layout_hdr,
851 .alloc_lseg = bl_alloc_lseg,
852 .free_lseg = bl_free_lseg,
853 .return_range = bl_return_range,
854 .prepare_layoutcommit = bl_prepare_layoutcommit,
855 .cleanup_layoutcommit = bl_cleanup_layoutcommit,
856 .set_layoutdriver = bl_set_layoutdriver,
857 .alloc_deviceid_node = bl_alloc_deviceid_node,
858 .free_deviceid_node = bl_free_deviceid_node,
859 .pg_read_ops = &bl_pg_read_ops,
860 .pg_write_ops = &bl_pg_write_ops,
863 static const struct rpc_pipe_ops bl_upcall_ops = {
864 .upcall = rpc_pipe_generic_upcall,
865 .downcall = bl_pipe_downcall,
866 .destroy_msg = bl_pipe_destroy_msg,
869 static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
870 struct rpc_pipe *pipe)
872 struct dentry *dir, *dentry;
874 dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
876 return ERR_PTR(-ENOENT);
877 dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
882 static void nfs4blocklayout_unregister_sb(struct super_block *sb,
883 struct rpc_pipe *pipe)
886 rpc_unlink(pipe->dentry);
889 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
892 struct super_block *sb = ptr;
893 struct net *net = sb->s_fs_info;
894 struct nfs_net *nn = net_generic(net, nfs_net_id);
895 struct dentry *dentry;
898 if (!try_module_get(THIS_MODULE))
901 if (nn->bl_device_pipe == NULL) {
902 module_put(THIS_MODULE);
907 case RPC_PIPEFS_MOUNT:
908 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
909 if (IS_ERR(dentry)) {
910 ret = PTR_ERR(dentry);
913 nn->bl_device_pipe->dentry = dentry;
915 case RPC_PIPEFS_UMOUNT:
916 if (nn->bl_device_pipe->dentry)
917 nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
923 module_put(THIS_MODULE);
927 static struct notifier_block nfs4blocklayout_block = {
928 .notifier_call = rpc_pipefs_event,
931 static struct dentry *nfs4blocklayout_register_net(struct net *net,
932 struct rpc_pipe *pipe)
934 struct super_block *pipefs_sb;
935 struct dentry *dentry;
937 pipefs_sb = rpc_get_sb_net(net);
940 dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
945 static void nfs4blocklayout_unregister_net(struct net *net,
946 struct rpc_pipe *pipe)
948 struct super_block *pipefs_sb;
950 pipefs_sb = rpc_get_sb_net(net);
952 nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
957 static int nfs4blocklayout_net_init(struct net *net)
959 struct nfs_net *nn = net_generic(net, nfs_net_id);
960 struct dentry *dentry;
962 init_waitqueue_head(&nn->bl_wq);
963 nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
964 if (IS_ERR(nn->bl_device_pipe))
965 return PTR_ERR(nn->bl_device_pipe);
966 dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
967 if (IS_ERR(dentry)) {
968 rpc_destroy_pipe_data(nn->bl_device_pipe);
969 return PTR_ERR(dentry);
971 nn->bl_device_pipe->dentry = dentry;
975 static void nfs4blocklayout_net_exit(struct net *net)
977 struct nfs_net *nn = net_generic(net, nfs_net_id);
979 nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
980 rpc_destroy_pipe_data(nn->bl_device_pipe);
981 nn->bl_device_pipe = NULL;
984 static struct pernet_operations nfs4blocklayout_net_ops = {
985 .init = nfs4blocklayout_net_init,
986 .exit = nfs4blocklayout_net_exit,
989 static int __init nfs4blocklayout_init(void)
993 dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
995 ret = pnfs_register_layoutdriver(&blocklayout_type);
999 ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1002 ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
1009 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1011 pnfs_unregister_layoutdriver(&blocklayout_type);
1015 static void __exit nfs4blocklayout_exit(void)
1017 dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
1020 rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1021 unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1022 pnfs_unregister_layoutdriver(&blocklayout_type);
1025 MODULE_ALIAS("nfs-layouttype4-3");
1027 module_init(nfs4blocklayout_init);
1028 module_exit(nfs4blocklayout_exit);