]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - fs/nfs/pnfs.c
pnfs/blocklayout: include vmalloc.h for __vmalloc
[karo-tx-linux.git] / fs / nfs / pnfs.c
index a8914b3356174a5063369452bc73fab7c0b8db2e..76de7f568119c7739c2f72fea155db3ee0d1bbec 100644 (file)
@@ -361,6 +361,23 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
 }
 EXPORT_SYMBOL_GPL(pnfs_put_lseg);
 
+static void pnfs_put_lseg_async_work(struct work_struct *work)
+{
+       struct pnfs_layout_segment *lseg;
+
+       lseg = container_of(work, struct pnfs_layout_segment, pls_work);
+
+       pnfs_put_lseg(lseg);
+}
+
+void
+pnfs_put_lseg_async(struct pnfs_layout_segment *lseg)
+{
+       INIT_WORK(&lseg->pls_work, pnfs_put_lseg_async_work);
+       schedule_work(&lseg->pls_work);
+}
+EXPORT_SYMBOL_GPL(pnfs_put_lseg_async);
+
 static u64
 end_offset(u64 start, u64 len)
 {
@@ -577,6 +594,9 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
                dprintk("%s freeing layout for inode %lu\n", __func__,
                        lo->plh_inode->i_ino);
                inode = lo->plh_inode;
+
+               pnfs_layoutcommit_inode(inode, false);
+
                spin_lock(&inode->i_lock);
                list_del_init(&lo->plh_bulk_destroy);
                lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
@@ -665,17 +685,6 @@ static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
        return (s32)(s1 - s2) > 0;
 }
 
-static void
-pnfs_verify_layout_stateid(struct pnfs_layout_hdr *lo,
-               const nfs4_stateid *new,
-               struct list_head *free_me_list)
-{
-       if (nfs4_stateid_match_other(&lo->plh_stateid, new))
-               return;
-       /* Layout is new! Kill existing layout segments */
-       pnfs_mark_matching_lsegs_invalid(lo, free_me_list, NULL);
-}
-
 /* update lo->plh_stateid with new if is more recent */
 void
 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
@@ -732,7 +741,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
                status = -EAGAIN;
        } else if (!nfs4_valid_open_stateid(open_state)) {
                status = -EBADF;
-       } else if (list_empty(&lo->plh_segs)) {
+       } else if (list_empty(&lo->plh_segs) ||
+                  test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
                int seq;
 
                do {
@@ -847,6 +857,16 @@ _pnfs_return_layout(struct inode *ino)
        empty = list_empty(&lo->plh_segs);
        pnfs_clear_layoutcommit(ino, &tmp_list);
        pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+
+       if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
+               struct pnfs_layout_range range = {
+                       .iomode         = IOMODE_ANY,
+                       .offset         = 0,
+                       .length         = NFS4_MAX_UINT64,
+               };
+               NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
+       }
+
        /* Don't send a LAYOUTRETURN if list was initially empty */
        if (empty) {
                spin_unlock(&ino->i_lock);
@@ -854,6 +874,8 @@ _pnfs_return_layout(struct inode *ino)
                dprintk("NFS: %s no layout segments to return\n", __func__);
                goto out;
        }
+
+       set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
        lo->plh_block_lgets++;
        spin_unlock(&ino->i_lock);
        pnfs_free_lseg_list(&tmp_list);
@@ -1341,25 +1363,41 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                goto out;
        }
 
+       init_lseg(lo, lseg);
+       lseg->pls_range = res->range;
+
        spin_lock(&ino->i_lock);
        if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
                dprintk("%s forget reply due to recall\n", __func__);
                goto out_forget_reply;
        }
 
-       if (pnfs_layoutgets_blocked(lo, 1) ||
-           pnfs_layout_stateid_blocked(lo, &res->stateid)) {
+       if (pnfs_layoutgets_blocked(lo, 1)) {
                dprintk("%s forget reply due to state\n", __func__);
                goto out_forget_reply;
        }
 
-       /* Check that the new stateid matches the old stateid */
-       pnfs_verify_layout_stateid(lo, &res->stateid, &free_me);
-       /* Done processing layoutget. Set the layout stateid */
-       pnfs_set_layout_stateid(lo, &res->stateid, false);
+       if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
+               /* existing state ID, make sure the sequence number matches. */
+               if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
+                       dprintk("%s forget reply due to sequence\n", __func__);
+                       goto out_forget_reply;
+               }
+               pnfs_set_layout_stateid(lo, &res->stateid, false);
+       } else {
+               /*
+                * We got an entirely new state ID.  Mark all segments for the
+                * inode invalid, and don't bother validating the stateid
+                * sequence number.
+                */
+               pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
+
+               nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
+               lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
+       }
+
+       clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
 
-       init_lseg(lo, lseg);
-       lseg->pls_range = res->range;
        pnfs_get_lseg(lseg);
        pnfs_layout_insert_lseg(lo, lseg);
 
@@ -1470,41 +1508,19 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
 
-int pnfs_write_done_resend_to_mds(struct inode *inode,
-                               struct list_head *head,
-                               const struct nfs_pgio_completion_ops *compl_ops,
-                               struct nfs_direct_req *dreq)
+int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
 {
        struct nfs_pageio_descriptor pgio;
-       LIST_HEAD(failed);
 
        /* Resend all requests through the MDS */
-       nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, true, compl_ops);
-       pgio.pg_dreq = dreq;
-       while (!list_empty(head)) {
-               struct nfs_page *req = nfs_list_entry(head->next);
-
-               nfs_list_remove_request(req);
-               if (!nfs_pageio_add_request(&pgio, req))
-                       nfs_list_add_request(req, &failed);
-       }
-       nfs_pageio_complete(&pgio);
-
-       if (!list_empty(&failed)) {
-               /* For some reason our attempt to resend pages. Mark the
-                * overall send request as having failed, and let
-                * nfs_writeback_release_full deal with the error.
-                */
-               list_move(&failed, head);
-               return -EIO;
-       }
-       return 0;
+       nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
+                             hdr->completion_ops);
+       return nfs_pageio_resend(&pgio, hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
 
-static void pnfs_ld_handle_write_error(struct nfs_pgio_data *data)
+static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_header *hdr = data->header;
 
        dprintk("pnfs write error = %d\n", hdr->pnfs_error);
        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
@@ -1512,50 +1528,42 @@ static void pnfs_ld_handle_write_error(struct nfs_pgio_data *data)
                pnfs_return_layout(hdr->inode);
        }
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
-               data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
-                                                       &hdr->pages,
-                                                       hdr->completion_ops,
-                                                       hdr->dreq);
+               hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
 }
 
 /*
  * Called by non rpc-based layout drivers
  */
-void pnfs_ld_write_done(struct nfs_pgio_data *data)
+void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_header *hdr = data->header;
-
-       trace_nfs4_pnfs_write(data, hdr->pnfs_error);
+       trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
        if (!hdr->pnfs_error) {
-               pnfs_set_layoutcommit(data);
-               hdr->mds_ops->rpc_call_done(&data->task, data);
+               pnfs_set_layoutcommit(hdr);
+               hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
        } else
-               pnfs_ld_handle_write_error(data);
-       hdr->mds_ops->rpc_release(data);
+               pnfs_ld_handle_write_error(hdr);
+       hdr->mds_ops->rpc_release(hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
 
 static void
 pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
-               struct nfs_pgio_data *data)
+               struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_header *hdr = data->header;
-
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                list_splice_tail_init(&hdr->pages, &desc->pg_list);
                nfs_pageio_reset_write_mds(desc);
                desc->pg_recoalesce = 1;
        }
-       nfs_pgio_data_release(data);
+       nfs_pgio_data_destroy(hdr);
 }
 
 static enum pnfs_try_status
-pnfs_try_to_write_data(struct nfs_pgio_data *wdata,
+pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
                        const struct rpc_call_ops *call_ops,
                        struct pnfs_layout_segment *lseg,
                        int how)
 {
-       struct nfs_pgio_header *hdr = wdata->header;
        struct inode *inode = hdr->inode;
        enum pnfs_try_status trypnfs;
        struct nfs_server *nfss = NFS_SERVER(inode);
@@ -1563,8 +1571,8 @@ pnfs_try_to_write_data(struct nfs_pgio_data *wdata,
        hdr->mds_ops = call_ops;
 
        dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
-               inode->i_ino, wdata->args.count, wdata->args.offset, how);
-       trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
+               inode->i_ino, hdr->args.count, hdr->args.offset, how);
+       trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
        if (trypnfs != PNFS_NOT_ATTEMPTED)
                nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
@@ -1575,139 +1583,105 @@ static void
 pnfs_do_write(struct nfs_pageio_descriptor *desc,
              struct nfs_pgio_header *hdr, int how)
 {
-       struct nfs_pgio_data *data = hdr->data;
        const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
        struct pnfs_layout_segment *lseg = desc->pg_lseg;
        enum pnfs_try_status trypnfs;
 
        desc->pg_lseg = NULL;
-       trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
+       trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
        if (trypnfs == PNFS_NOT_ATTEMPTED)
-               pnfs_write_through_mds(desc, data);
+               pnfs_write_through_mds(desc, hdr);
        pnfs_put_lseg(lseg);
 }
 
 static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
 {
        pnfs_put_lseg(hdr->lseg);
-       nfs_rw_header_free(hdr);
+       nfs_pgio_header_free(hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
 
 int
 pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
 {
-       struct nfs_rw_header *whdr;
        struct nfs_pgio_header *hdr;
        int ret;
 
-       whdr = nfs_rw_header_alloc(desc->pg_rw_ops);
-       if (!whdr) {
+       hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
+       if (!hdr) {
                desc->pg_completion_ops->error_cleanup(&desc->pg_list);
                pnfs_put_lseg(desc->pg_lseg);
                desc->pg_lseg = NULL;
                return -ENOMEM;
        }
-       hdr = &whdr->header;
        nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
        hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
-       atomic_inc(&hdr->refcnt);
        ret = nfs_generic_pgio(desc, hdr);
        if (ret != 0) {
                pnfs_put_lseg(desc->pg_lseg);
                desc->pg_lseg = NULL;
        } else
                pnfs_do_write(desc, hdr, desc->pg_ioflags);
-       if (atomic_dec_and_test(&hdr->refcnt))
-               hdr->completion_ops->completion(hdr);
        return ret;
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
 
-int pnfs_read_done_resend_to_mds(struct inode *inode,
-                               struct list_head *head,
-                               const struct nfs_pgio_completion_ops *compl_ops,
-                               struct nfs_direct_req *dreq)
+int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
 {
        struct nfs_pageio_descriptor pgio;
-       LIST_HEAD(failed);
 
        /* Resend all requests through the MDS */
-       nfs_pageio_init_read(&pgio, inode, true, compl_ops);
-       pgio.pg_dreq = dreq;
-       while (!list_empty(head)) {
-               struct nfs_page *req = nfs_list_entry(head->next);
-
-               nfs_list_remove_request(req);
-               if (!nfs_pageio_add_request(&pgio, req))
-                       nfs_list_add_request(req, &failed);
-       }
-       nfs_pageio_complete(&pgio);
-
-       if (!list_empty(&failed)) {
-               list_move(&failed, head);
-               return -EIO;
-       }
-       return 0;
+       nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
+       return nfs_pageio_resend(&pgio, hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
 
-static void pnfs_ld_handle_read_error(struct nfs_pgio_data *data)
+static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_header *hdr = data->header;
-
        dprintk("pnfs read error = %d\n", hdr->pnfs_error);
        if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
            PNFS_LAYOUTRET_ON_ERROR) {
                pnfs_return_layout(hdr->inode);
        }
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
-               data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
-                                                       &hdr->pages,
-                                                       hdr->completion_ops,
-                                                       hdr->dreq);
+               hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
 }
 
 /*
  * Called by non rpc-based layout drivers
  */
-void pnfs_ld_read_done(struct nfs_pgio_data *data)
+void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_header *hdr = data->header;
-
-       trace_nfs4_pnfs_read(data, hdr->pnfs_error);
+       trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
        if (likely(!hdr->pnfs_error)) {
-               __nfs4_read_done_cb(data);
-               hdr->mds_ops->rpc_call_done(&data->task, data);
+               __nfs4_read_done_cb(hdr);
+               hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
        } else
-               pnfs_ld_handle_read_error(data);
-       hdr->mds_ops->rpc_release(data);
+               pnfs_ld_handle_read_error(hdr);
+       hdr->mds_ops->rpc_release(hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
 
 static void
 pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
-               struct nfs_pgio_data *data)
+               struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_header *hdr = data->header;
-
        if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                list_splice_tail_init(&hdr->pages, &desc->pg_list);
                nfs_pageio_reset_read_mds(desc);
                desc->pg_recoalesce = 1;
        }
-       nfs_pgio_data_release(data);
+       nfs_pgio_data_destroy(hdr);
 }
 
 /*
  * Call the appropriate parallel I/O subsystem read function.
  */
 static enum pnfs_try_status
-pnfs_try_to_read_data(struct nfs_pgio_data *rdata,
+pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
                       const struct rpc_call_ops *call_ops,
                       struct pnfs_layout_segment *lseg)
 {
-       struct nfs_pgio_header *hdr = rdata->header;
        struct inode *inode = hdr->inode;
        struct nfs_server *nfss = NFS_SERVER(inode);
        enum pnfs_try_status trypnfs;
@@ -1715,9 +1689,9 @@ pnfs_try_to_read_data(struct nfs_pgio_data *rdata,
        hdr->mds_ops = call_ops;
 
        dprintk("%s: Reading ino:%lu %u@%llu\n",
-               __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
+               __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
 
-       trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
+       trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
        if (trypnfs != PNFS_NOT_ATTEMPTED)
                nfs_inc_stats(inode, NFSIOS_PNFS_READ);
        dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
@@ -1727,52 +1701,46 @@ pnfs_try_to_read_data(struct nfs_pgio_data *rdata,
 static void
 pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_data *data = hdr->data;
        const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
        struct pnfs_layout_segment *lseg = desc->pg_lseg;
        enum pnfs_try_status trypnfs;
 
        desc->pg_lseg = NULL;
-       trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
+       trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
        if (trypnfs == PNFS_NOT_ATTEMPTED)
-               pnfs_read_through_mds(desc, data);
+               pnfs_read_through_mds(desc, hdr);
        pnfs_put_lseg(lseg);
 }
 
 static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
 {
        pnfs_put_lseg(hdr->lseg);
-       nfs_rw_header_free(hdr);
+       nfs_pgio_header_free(hdr);
 }
 EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
 
 int
 pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
 {
-       struct nfs_rw_header *rhdr;
        struct nfs_pgio_header *hdr;
        int ret;
 
-       rhdr = nfs_rw_header_alloc(desc->pg_rw_ops);
-       if (!rhdr) {
+       hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
+       if (!hdr) {
                desc->pg_completion_ops->error_cleanup(&desc->pg_list);
                ret = -ENOMEM;
                pnfs_put_lseg(desc->pg_lseg);
                desc->pg_lseg = NULL;
                return ret;
        }
-       hdr = &rhdr->header;
        nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
        hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
-       atomic_inc(&hdr->refcnt);
        ret = nfs_generic_pgio(desc, hdr);
        if (ret != 0) {
                pnfs_put_lseg(desc->pg_lseg);
                desc->pg_lseg = NULL;
        } else
                pnfs_do_read(desc, hdr);
-       if (atomic_dec_and_test(&hdr->refcnt))
-               hdr->completion_ops->completion(hdr);
        return ret;
 }
 EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
@@ -1820,12 +1788,11 @@ void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
 EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
 
 void
-pnfs_set_layoutcommit(struct nfs_pgio_data *wdata)
+pnfs_set_layoutcommit(struct nfs_pgio_header *hdr)
 {
-       struct nfs_pgio_header *hdr = wdata->header;
        struct inode *inode = hdr->inode;
        struct nfs_inode *nfsi = NFS_I(inode);
-       loff_t end_pos = wdata->mds_offset + wdata->res.count;
+       loff_t end_pos = hdr->mds_offset + hdr->res.count;
        bool mark_as_dirty = false;
 
        spin_lock(&inode->i_lock);
@@ -1851,6 +1818,35 @@ pnfs_set_layoutcommit(struct nfs_pgio_data *wdata)
 }
 EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
 
+void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data)
+{
+       struct inode *inode = data->inode;
+       struct nfs_inode *nfsi = NFS_I(inode);
+       bool mark_as_dirty = false;
+
+       spin_lock(&inode->i_lock);
+       if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
+               mark_as_dirty = true;
+               dprintk("%s: Set layoutcommit for inode %lu ",
+                       __func__, inode->i_ino);
+       }
+       if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) {
+               /* references matched in nfs4_layoutcommit_release */
+               pnfs_get_lseg(data->lseg);
+       }
+       if (data->lwb > nfsi->layout->plh_lwb)
+               nfsi->layout->plh_lwb = data->lwb;
+       spin_unlock(&inode->i_lock);
+       dprintk("%s: lseg %p end_pos %llu\n",
+               __func__, data->lseg, nfsi->layout->plh_lwb);
+
+       /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
+        * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
+       if (mark_as_dirty)
+               mark_inode_dirty_sync(inode);
+}
+EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit);
+
 void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
 {
        struct nfs_server *nfss = NFS_SERVER(data->args.inode);
@@ -1871,6 +1867,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
 int
 pnfs_layoutcommit_inode(struct inode *inode, bool sync)
 {
+       struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
        struct nfs4_layoutcommit_data *data;
        struct nfs_inode *nfsi = NFS_I(inode);
        loff_t end_pos;
@@ -1921,6 +1918,20 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
        data->args.lastbytewritten = end_pos - 1;
        data->res.server = NFS_SERVER(inode);
 
+       if (ld->prepare_layoutcommit) {
+               status = ld->prepare_layoutcommit(&data->args);
+               if (status) {
+                       spin_lock(&inode->i_lock);
+                       if (end_pos < nfsi->layout->plh_lwb)
+                               nfsi->layout->plh_lwb = end_pos;
+                       spin_unlock(&inode->i_lock);
+                       put_rpccred(data->cred);
+                       set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
+                       goto clear_layoutcommitting;
+               }
+       }
+
+
        status = nfs4_proc_layoutcommit(data, sync);
 out:
        if (status)