2 * file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
4 * Copyright (c) 2001-2015 Anton Altaparmakov and Tuxera Inc.
6 * This program/include file is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program/include file is distributed in the hope that it will be
12 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program (in the main directory of the Linux-NTFS
18 * distribution in the file COPYING); if not, write to the Free Software
19 * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/backing-dev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/gfp.h>
25 #include <linux/pagemap.h>
26 #include <linux/pagevec.h>
27 #include <linux/sched.h>
28 #include <linux/swap.h>
29 #include <linux/uio.h>
30 #include <linux/writeback.h>
33 #include <asm/uaccess.h>
45 * ntfs_file_open - called when an inode is about to be opened
46 * @vi: inode to be opened
47 * @filp: file structure describing the inode
49 * Limit file size to the page cache limit on architectures where unsigned long
50 * is 32-bits. This is the most we can do for now without overflowing the page
51 * cache page index. Doing it this way means we don't run into problems because
52 * of existing too large files. It would be better to allow the user to read
53 * the beginning of the file but I doubt very much anyone is going to hit this
54 * check on a 32-bit architecture, so there is no point in adding the extra
55 * complexity required to support this.
57 * On 64-bit architectures, the check is hopefully optimized away by the
60 * After the check passes, just call generic_file_open() to do its work.
62 static int ntfs_file_open(struct inode *vi, struct file *filp)
64 if (sizeof(unsigned long) < 8) {
65 if (i_size_read(vi) > MAX_LFS_FILESIZE)
68 return generic_file_open(vi, filp);
74 * ntfs_attr_extend_initialized - extend the initialized size of an attribute
75 * @ni: ntfs inode of the attribute to extend
76 * @new_init_size: requested new initialized size in bytes
78 * Extend the initialized size of an attribute described by the ntfs inode @ni
79 * to @new_init_size bytes. This involves zeroing any non-sparse space between
80 * the old initialized size and @new_init_size both in the page cache and on
81 * disk (if relevant complete pages are already uptodate in the page cache then
82 * these are simply marked dirty).
84 * As a side-effect, the file size (vfs inode->i_size) may be incremented as,
85 * in the resident attribute case, it is tied to the initialized size and, in
86 * the non-resident attribute case, it may not fall below the initialized size.
88 * Note that if the attribute is resident, we do not need to touch the page
89 * cache at all. This is because if the page cache page is not uptodate we
90 * bring it uptodate later, when doing the write to the mft record since we
91 * then already have the page mapped. And if the page is uptodate, the
92 * non-initialized region will already have been zeroed when the page was
93 * brought uptodate and the region may in fact already have been overwritten
94 * with new data via mmap() based writes, so we cannot just zero it. And since
95 * POSIX specifies that the behaviour of resizing a file whilst it is mmap()ped
96 * is unspecified, we choose not to do zeroing and thus we do not need to touch
97 * the page at all. For a more detailed explanation see ntfs_truncate() in
100 * Return 0 on success and -errno on error. In the case that an error is
101 * encountered it is possible that the initialized size will already have been
102 * incremented some way towards @new_init_size but it is guaranteed that if
103 * this is the case, the necessary zeroing will also have happened and that all
104 * metadata is self-consistent.
106 * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
107 * held by the caller.
109 static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
113 pgoff_t index, end_index;
115 struct inode *vi = VFS_I(ni);
117 MFT_RECORD *m = NULL;
119 ntfs_attr_search_ctx *ctx = NULL;
120 struct address_space *mapping;
121 struct page *page = NULL;
126 read_lock_irqsave(&ni->size_lock, flags);
127 old_init_size = ni->initialized_size;
128 old_i_size = i_size_read(vi);
129 BUG_ON(new_init_size > ni->allocated_size);
130 read_unlock_irqrestore(&ni->size_lock, flags);
131 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
132 "old_initialized_size 0x%llx, "
133 "new_initialized_size 0x%llx, i_size 0x%llx.",
134 vi->i_ino, (unsigned)le32_to_cpu(ni->type),
135 (unsigned long long)old_init_size,
136 (unsigned long long)new_init_size, old_i_size);
140 base_ni = ni->ext.base_ntfs_ino;
141 /* Use goto to reduce indentation and we need the label below anyway. */
142 if (NInoNonResident(ni))
143 goto do_non_resident_extend;
144 BUG_ON(old_init_size != old_i_size);
145 m = map_mft_record(base_ni);
151 ctx = ntfs_attr_get_search_ctx(base_ni, m);
152 if (unlikely(!ctx)) {
156 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
157 CASE_SENSITIVE, 0, NULL, 0, ctx);
165 BUG_ON(a->non_resident);
166 /* The total length of the attribute value. */
167 attr_len = le32_to_cpu(a->data.resident.value_length);
168 BUG_ON(old_i_size != (loff_t)attr_len);
170 * Do the zeroing in the mft record and update the attribute size in
173 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
174 memset(kattr + attr_len, 0, new_init_size - attr_len);
175 a->data.resident.value_length = cpu_to_le32((u32)new_init_size);
176 /* Finally, update the sizes in the vfs and ntfs inodes. */
177 write_lock_irqsave(&ni->size_lock, flags);
178 i_size_write(vi, new_init_size);
179 ni->initialized_size = new_init_size;
180 write_unlock_irqrestore(&ni->size_lock, flags);
182 do_non_resident_extend:
184 * If the new initialized size @new_init_size exceeds the current file
185 * size (vfs inode->i_size), we need to extend the file size to the
186 * new initialized size.
188 if (new_init_size > old_i_size) {
189 m = map_mft_record(base_ni);
195 ctx = ntfs_attr_get_search_ctx(base_ni, m);
196 if (unlikely(!ctx)) {
200 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
201 CASE_SENSITIVE, 0, NULL, 0, ctx);
209 BUG_ON(!a->non_resident);
210 BUG_ON(old_i_size != (loff_t)
211 sle64_to_cpu(a->data.non_resident.data_size));
212 a->data.non_resident.data_size = cpu_to_sle64(new_init_size);
213 flush_dcache_mft_record_page(ctx->ntfs_ino);
214 mark_mft_record_dirty(ctx->ntfs_ino);
215 /* Update the file size in the vfs inode. */
216 i_size_write(vi, new_init_size);
217 ntfs_attr_put_search_ctx(ctx);
219 unmap_mft_record(base_ni);
222 mapping = vi->i_mapping;
223 index = old_init_size >> PAGE_CACHE_SHIFT;
224 end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
227 * Read the page. If the page is not present, this will zero
228 * the uninitialized regions for us.
230 page = read_mapping_page(mapping, index, NULL);
235 if (unlikely(PageError(page))) {
236 page_cache_release(page);
241 * Update the initialized size in the ntfs inode. This is
242 * enough to make ntfs_writepage() work.
244 write_lock_irqsave(&ni->size_lock, flags);
245 ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT;
246 if (ni->initialized_size > new_init_size)
247 ni->initialized_size = new_init_size;
248 write_unlock_irqrestore(&ni->size_lock, flags);
249 /* Set the page dirty so it gets written out. */
250 set_page_dirty(page);
251 page_cache_release(page);
253 * Play nice with the vm and the rest of the system. This is
254 * very much needed as we can potentially be modifying the
255 * initialised size from a very small value to a really huge
257 * f = open(somefile, O_TRUNC);
258 * truncate(f, 10GiB);
261 * And this would mean we would be marking dirty hundreds of
262 * thousands of pages or as in the above example more than
263 * two and a half million pages!
265 * TODO: For sparse pages could optimize this workload by using
266 * the FsMisc / MiscFs page bit as a "PageIsSparse" bit. This
267 * would be set in readpage for sparse pages and here we would
268 * not need to mark dirty any pages which have this bit set.
269 * The only caveat is that we have to clear the bit everywhere
270 * where we allocate any clusters that lie in the page or that
273 * TODO: An even greater optimization would be for us to only
274 * call readpage() on pages which are not in sparse regions as
275 * determined from the runlist. This would greatly reduce the
276 * number of pages we read and make dirty in the case of sparse
279 balance_dirty_pages_ratelimited(mapping);
281 } while (++index < end_index);
282 read_lock_irqsave(&ni->size_lock, flags);
283 BUG_ON(ni->initialized_size != new_init_size);
284 read_unlock_irqrestore(&ni->size_lock, flags);
285 /* Now bring in sync the initialized_size in the mft record. */
286 m = map_mft_record(base_ni);
292 ctx = ntfs_attr_get_search_ctx(base_ni, m);
293 if (unlikely(!ctx)) {
297 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
298 CASE_SENSITIVE, 0, NULL, 0, ctx);
306 BUG_ON(!a->non_resident);
307 a->data.non_resident.initialized_size = cpu_to_sle64(new_init_size);
309 flush_dcache_mft_record_page(ctx->ntfs_ino);
310 mark_mft_record_dirty(ctx->ntfs_ino);
312 ntfs_attr_put_search_ctx(ctx);
314 unmap_mft_record(base_ni);
315 ntfs_debug("Done, initialized_size 0x%llx, i_size 0x%llx.",
316 (unsigned long long)new_init_size, i_size_read(vi));
319 write_lock_irqsave(&ni->size_lock, flags);
320 ni->initialized_size = old_init_size;
321 write_unlock_irqrestore(&ni->size_lock, flags);
324 ntfs_attr_put_search_ctx(ctx);
326 unmap_mft_record(base_ni);
327 ntfs_debug("Failed. Returning error code %i.", err);
331 static ssize_t ntfs_prepare_file_for_write(struct file *file, loff_t *ppos,
338 struct inode *vi = file_inode(file);
339 ntfs_inode *base_ni, *ni = NTFS_I(vi);
340 ntfs_volume *vol = ni->vol;
342 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
343 "0x%llx, count 0x%lx.", vi->i_ino,
344 (unsigned)le32_to_cpu(ni->type),
345 (unsigned long long)*ppos, (unsigned long)*count);
346 /* We can write back this queue in page reclaim. */
347 current->backing_dev_info = inode_to_bdi(vi);
348 err = generic_write_checks(file, ppos, count, S_ISBLK(vi->i_mode));
352 * All checks have passed. Before we start doing any writing we want
353 * to abort any totally illegal writes.
355 BUG_ON(NInoMstProtected(ni));
356 BUG_ON(ni->type != AT_DATA);
357 /* If file is encrypted, deny access, just like NT4. */
358 if (NInoEncrypted(ni)) {
359 /* Only $DATA attributes can be encrypted. */
361 * Reminder for later: Encrypted files are _always_
362 * non-resident so that the content can always be encrypted.
364 ntfs_debug("Denying write access to encrypted file.");
368 if (NInoCompressed(ni)) {
369 /* Only unnamed $DATA attribute can be compressed. */
370 BUG_ON(ni->name_len);
372 * Reminder for later: If resident, the data is not actually
373 * compressed. Only on the switch to non-resident does
374 * compression kick in. This is in contrast to encrypted files
377 ntfs_error(vi->i_sb, "Writing to compressed files is not "
378 "implemented yet. Sorry.");
386 base_ni = ni->ext.base_ntfs_ino;
387 err = file_remove_suid(file);
391 * Our ->update_time method always succeeds thus file_update_time()
392 * cannot fail either so there is no need to check the return code.
394 file_update_time(file);
396 /* The first byte after the last cluster being written to. */
397 end = (pos + *count + vol->cluster_size_mask) &
398 ~(u64)vol->cluster_size_mask;
400 * If the write goes beyond the allocated size, extend the allocation
401 * to cover the whole of the write, rounded up to the nearest cluster.
403 read_lock_irqsave(&ni->size_lock, flags);
404 ll = ni->allocated_size;
405 read_unlock_irqrestore(&ni->size_lock, flags);
408 * Extend the allocation without changing the data size.
410 * Note we ensure the allocation is big enough to at least
411 * write some data but we do not require the allocation to be
412 * complete, i.e. it may be partial.
414 ll = ntfs_attr_extend_allocation(ni, end, -1, pos);
415 if (likely(ll >= 0)) {
417 /* If the extension was partial truncate the write. */
419 ntfs_debug("Truncating write to inode 0x%lx, "
420 "attribute type 0x%x, because "
421 "the allocation was only "
422 "partially extended.",
423 vi->i_ino, (unsigned)
424 le32_to_cpu(ni->type));
429 read_lock_irqsave(&ni->size_lock, flags);
430 ll = ni->allocated_size;
431 read_unlock_irqrestore(&ni->size_lock, flags);
432 /* Perform a partial write if possible or fail. */
434 ntfs_debug("Truncating write to inode 0x%lx "
435 "attribute type 0x%x, because "
436 "extending the allocation "
437 "failed (error %d).",
438 vi->i_ino, (unsigned)
439 le32_to_cpu(ni->type),
444 ntfs_error(vi->i_sb, "Cannot perform "
447 "type 0x%x, because "
451 vi->i_ino, (unsigned)
452 le32_to_cpu(ni->type),
455 ntfs_debug("Cannot perform write to "
457 "attribute type 0x%x, "
458 "because there is not "
460 vi->i_ino, (unsigned)
461 le32_to_cpu(ni->type));
467 * If the write starts beyond the initialized size, extend it up to the
468 * beginning of the write and initialize all non-sparse space between
469 * the old initialized size and the new one. This automatically also
470 * increments the vfs inode->i_size to keep it above or equal to the
473 read_lock_irqsave(&ni->size_lock, flags);
474 ll = ni->initialized_size;
475 read_unlock_irqrestore(&ni->size_lock, flags);
478 * Wait for ongoing direct i/o to complete before proceeding.
479 * New direct i/o cannot start as we hold i_mutex.
482 err = ntfs_attr_extend_initialized(ni, pos);
483 if (unlikely(err < 0))
484 ntfs_error(vi->i_sb, "Cannot perform write to inode "
485 "0x%lx, attribute type 0x%x, because "
486 "extending the initialized size "
487 "failed (error %d).", vi->i_ino,
488 (unsigned)le32_to_cpu(ni->type),
496 * __ntfs_grab_cache_pages - obtain a number of locked pages
497 * @mapping: address space mapping from which to obtain page cache pages
498 * @index: starting index in @mapping at which to begin obtaining pages
499 * @nr_pages: number of page cache pages to obtain
500 * @pages: array of pages in which to return the obtained page cache pages
501 * @cached_page: allocated but as yet unused page
503 * Obtain @nr_pages locked page cache pages from the mapping @mapping and
504 * starting at index @index.
506 * If a page is newly created, add it to lru list
508 * Note, the page locks are obtained in ascending page index order.
510 static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
511 pgoff_t index, const unsigned nr_pages, struct page **pages,
512 struct page **cached_page)
519 pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK |
523 *cached_page = page_cache_alloc(mapping);
524 if (unlikely(!*cached_page)) {
529 err = add_to_page_cache_lru(*cached_page, mapping,
536 pages[nr] = *cached_page;
541 } while (nr < nr_pages);
546 unlock_page(pages[--nr]);
547 page_cache_release(pages[nr]);
552 static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
556 bh->b_end_io = end_buffer_read_sync;
557 return submit_bh(READ, bh);
561 * ntfs_prepare_pages_for_non_resident_write - prepare pages for receiving data
562 * @pages: array of destination pages
563 * @nr_pages: number of pages in @pages
564 * @pos: byte position in file at which the write begins
565 * @bytes: number of bytes to be written
567 * This is called for non-resident attributes from ntfs_file_buffered_write()
568 * with i_mutex held on the inode (@pages[0]->mapping->host). There are
569 * @nr_pages pages in @pages which are locked but not kmap()ped. The source
570 * data has not yet been copied into the @pages.
572 * Need to fill any holes with actual clusters, allocate buffers if necessary,
573 * ensure all the buffers are mapped, and bring uptodate any buffers that are
574 * only partially being written to.
576 * If @nr_pages is greater than one, we are guaranteed that the cluster size is
577 * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside
578 * the same cluster and that they are the entirety of that cluster, and that
579 * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole.
581 * i_size is not to be modified yet.
583 * Return 0 on success or -errno on error.
585 static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
586 unsigned nr_pages, s64 pos, size_t bytes)
588 VCN vcn, highest_vcn = 0, cpos, cend, bh_cpos, bh_cend;
590 s64 bh_pos, vcn_len, end, initialized_size;
594 ntfs_inode *ni, *base_ni = NULL;
596 runlist_element *rl, *rl2;
597 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
598 ntfs_attr_search_ctx *ctx = NULL;
599 MFT_RECORD *m = NULL;
600 ATTR_RECORD *a = NULL;
602 u32 attr_rec_len = 0;
603 unsigned blocksize, u;
605 bool rl_write_locked, was_hole, is_retry;
606 unsigned char blocksize_bits;
609 u8 mft_attr_mapped:1;
612 } status = { 0, 0, 0, 0 };
617 vi = pages[0]->mapping->host;
620 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
621 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
622 vi->i_ino, ni->type, pages[0]->index, nr_pages,
623 (long long)pos, bytes);
624 blocksize = vol->sb->s_blocksize;
625 blocksize_bits = vol->sb->s_blocksize_bits;
631 * create_empty_buffers() will create uptodate/dirty buffers if
632 * the page is uptodate/dirty.
634 if (!page_has_buffers(page)) {
635 create_empty_buffers(page, blocksize, 0);
636 if (unlikely(!page_has_buffers(page)))
639 } while (++u < nr_pages);
640 rl_write_locked = false;
647 cpos = pos >> vol->cluster_size_bits;
649 cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
651 * Loop over each page and for each page over each buffer. Use goto to
652 * reduce indentation.
657 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
658 bh = head = page_buffers(page);
664 /* Clear buffer_new on all buffers to reinitialise state. */
666 clear_buffer_new(bh);
667 bh_end = bh_pos + blocksize;
668 bh_cpos = bh_pos >> vol->cluster_size_bits;
669 bh_cofs = bh_pos & vol->cluster_size_mask;
670 if (buffer_mapped(bh)) {
672 * The buffer is already mapped. If it is uptodate,
675 if (buffer_uptodate(bh))
678 * The buffer is not uptodate. If the page is uptodate
679 * set the buffer uptodate and otherwise ignore it.
681 if (PageUptodate(page)) {
682 set_buffer_uptodate(bh);
686 * Neither the page nor the buffer are uptodate. If
687 * the buffer is only partially being written to, we
688 * need to read it in before the write, i.e. now.
690 if ((bh_pos < pos && bh_end > pos) ||
691 (bh_pos < end && bh_end > end)) {
693 * If the buffer is fully or partially within
694 * the initialized size, do an actual read.
695 * Otherwise, simply zero the buffer.
697 read_lock_irqsave(&ni->size_lock, flags);
698 initialized_size = ni->initialized_size;
699 read_unlock_irqrestore(&ni->size_lock, flags);
700 if (bh_pos < initialized_size) {
701 ntfs_submit_bh_for_read(bh);
704 zero_user(page, bh_offset(bh),
706 set_buffer_uptodate(bh);
711 /* Unmapped buffer. Need to map it. */
712 bh->b_bdev = vol->sb->s_bdev;
714 * If the current buffer is in the same clusters as the map
715 * cache, there is no need to check the runlist again. The
716 * map cache is made up of @vcn, which is the first cached file
717 * cluster, @vcn_len which is the number of cached file
718 * clusters, @lcn is the device cluster corresponding to @vcn,
719 * and @lcn_block is the block number corresponding to @lcn.
721 cdelta = bh_cpos - vcn;
722 if (likely(!cdelta || (cdelta > 0 && cdelta < vcn_len))) {
725 bh->b_blocknr = lcn_block +
726 (cdelta << (vol->cluster_size_bits -
728 (bh_cofs >> blocksize_bits);
729 set_buffer_mapped(bh);
731 * If the page is uptodate so is the buffer. If the
732 * buffer is fully outside the write, we ignore it if
733 * it was already allocated and we mark it dirty so it
734 * gets written out if we allocated it. On the other
735 * hand, if we allocated the buffer but we are not
736 * marking it dirty we set buffer_new so we can do
739 if (PageUptodate(page)) {
740 if (!buffer_uptodate(bh))
741 set_buffer_uptodate(bh);
742 if (unlikely(was_hole)) {
743 /* We allocated the buffer. */
744 unmap_underlying_metadata(bh->b_bdev,
746 if (bh_end <= pos || bh_pos >= end)
747 mark_buffer_dirty(bh);
753 /* Page is _not_ uptodate. */
754 if (likely(!was_hole)) {
756 * Buffer was already allocated. If it is not
757 * uptodate and is only partially being written
758 * to, we need to read it in before the write,
761 if (!buffer_uptodate(bh) && bh_pos < end &&
766 * If the buffer is fully or partially
767 * within the initialized size, do an
768 * actual read. Otherwise, simply zero
771 read_lock_irqsave(&ni->size_lock,
773 initialized_size = ni->initialized_size;
774 read_unlock_irqrestore(&ni->size_lock,
776 if (bh_pos < initialized_size) {
777 ntfs_submit_bh_for_read(bh);
780 zero_user(page, bh_offset(bh),
782 set_buffer_uptodate(bh);
787 /* We allocated the buffer. */
788 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
790 * If the buffer is fully outside the write, zero it,
791 * set it uptodate, and mark it dirty so it gets
792 * written out. If it is partially being written to,
793 * zero region surrounding the write but leave it to
794 * commit write to do anything else. Finally, if the
795 * buffer is fully being overwritten, do nothing.
797 if (bh_end <= pos || bh_pos >= end) {
798 if (!buffer_uptodate(bh)) {
799 zero_user(page, bh_offset(bh),
801 set_buffer_uptodate(bh);
803 mark_buffer_dirty(bh);
807 if (!buffer_uptodate(bh) &&
808 (bh_pos < pos || bh_end > end)) {
812 kaddr = kmap_atomic(page);
814 pofs = bh_pos & ~PAGE_CACHE_MASK;
815 memset(kaddr + pofs, 0, pos - bh_pos);
818 pofs = end & ~PAGE_CACHE_MASK;
819 memset(kaddr + pofs, 0, bh_end - end);
821 kunmap_atomic(kaddr);
822 flush_dcache_page(page);
827 * Slow path: this is the first buffer in the cluster. If it
828 * is outside allocated size and is not uptodate, zero it and
831 read_lock_irqsave(&ni->size_lock, flags);
832 initialized_size = ni->allocated_size;
833 read_unlock_irqrestore(&ni->size_lock, flags);
834 if (bh_pos > initialized_size) {
835 if (PageUptodate(page)) {
836 if (!buffer_uptodate(bh))
837 set_buffer_uptodate(bh);
838 } else if (!buffer_uptodate(bh)) {
839 zero_user(page, bh_offset(bh), blocksize);
840 set_buffer_uptodate(bh);
846 down_read(&ni->runlist.lock);
850 if (likely(rl != NULL)) {
851 /* Seek to element containing target cluster. */
852 while (rl->length && rl[1].vcn <= bh_cpos)
854 lcn = ntfs_rl_vcn_to_lcn(rl, bh_cpos);
855 if (likely(lcn >= 0)) {
857 * Successful remap, setup the map cache and
858 * use that to deal with the buffer.
862 vcn_len = rl[1].vcn - vcn;
863 lcn_block = lcn << (vol->cluster_size_bits -
867 * If the number of remaining clusters touched
868 * by the write is smaller or equal to the
869 * number of cached clusters, unlock the
870 * runlist as the map cache will be used from
873 if (likely(vcn + vcn_len >= cend)) {
874 if (rl_write_locked) {
875 up_write(&ni->runlist.lock);
876 rl_write_locked = false;
878 up_read(&ni->runlist.lock);
881 goto map_buffer_cached;
884 lcn = LCN_RL_NOT_MAPPED;
886 * If it is not a hole and not out of bounds, the runlist is
887 * probably unmapped so try to map it now.
889 if (unlikely(lcn != LCN_HOLE && lcn != LCN_ENOENT)) {
890 if (likely(!is_retry && lcn == LCN_RL_NOT_MAPPED)) {
891 /* Attempt to map runlist. */
892 if (!rl_write_locked) {
894 * We need the runlist locked for
895 * writing, so if it is locked for
896 * reading relock it now and retry in
897 * case it changed whilst we dropped
900 up_read(&ni->runlist.lock);
901 down_write(&ni->runlist.lock);
902 rl_write_locked = true;
905 err = ntfs_map_runlist_nolock(ni, bh_cpos,
912 * If @vcn is out of bounds, pretend @lcn is
913 * LCN_ENOENT. As long as the buffer is out
914 * of bounds this will work fine.
916 if (err == -ENOENT) {
919 goto rl_not_mapped_enoent;
923 /* Failed to map the buffer, even after retrying. */
925 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
926 "attribute type 0x%x, vcn 0x%llx, "
927 "vcn offset 0x%x, because its "
928 "location on disk could not be "
929 "determined%s (error code %i).",
930 ni->mft_no, ni->type,
931 (unsigned long long)bh_cpos,
933 vol->cluster_size_mask,
934 is_retry ? " even after retrying" : "",
938 rl_not_mapped_enoent:
940 * The buffer is in a hole or out of bounds. We need to fill
941 * the hole, unless the buffer is in a cluster which is not
942 * touched by the write, in which case we just leave the buffer
943 * unmapped. This can only happen when the cluster size is
944 * less than the page cache size.
946 if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) {
947 bh_cend = (bh_end + vol->cluster_size - 1) >>
948 vol->cluster_size_bits;
949 if ((bh_cend <= cpos || bh_cpos >= cend)) {
952 * If the buffer is uptodate we skip it. If it
953 * is not but the page is uptodate, we can set
954 * the buffer uptodate. If the page is not
955 * uptodate, we can clear the buffer and set it
956 * uptodate. Whether this is worthwhile is
957 * debatable and this could be removed.
959 if (PageUptodate(page)) {
960 if (!buffer_uptodate(bh))
961 set_buffer_uptodate(bh);
962 } else if (!buffer_uptodate(bh)) {
963 zero_user(page, bh_offset(bh),
965 set_buffer_uptodate(bh);
971 * Out of bounds buffer is invalid if it was not really out of
974 BUG_ON(lcn != LCN_HOLE);
976 * We need the runlist locked for writing, so if it is locked
977 * for reading relock it now and retry in case it changed
978 * whilst we dropped the lock.
981 if (!rl_write_locked) {
982 up_read(&ni->runlist.lock);
983 down_write(&ni->runlist.lock);
984 rl_write_locked = true;
987 /* Find the previous last allocated cluster. */
988 BUG_ON(rl->lcn != LCN_HOLE);
991 while (--rl2 >= ni->runlist.rl) {
993 lcn = rl2->lcn + rl2->length;
997 rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
1001 ntfs_debug("Failed to allocate cluster, error code %i.",
1006 rl = ntfs_runlists_merge(ni->runlist.rl, rl2);
1011 if (ntfs_cluster_free_from_rl(vol, rl2)) {
1012 ntfs_error(vol->sb, "Failed to release "
1013 "allocated cluster in error "
1014 "code path. Run chkdsk to "
1015 "recover the lost cluster.");
1021 ni->runlist.rl = rl;
1022 status.runlist_merged = 1;
1023 ntfs_debug("Allocated cluster, lcn 0x%llx.",
1024 (unsigned long long)lcn);
1025 /* Map and lock the mft record and get the attribute record. */
1029 base_ni = ni->ext.base_ntfs_ino;
1030 m = map_mft_record(base_ni);
1035 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1036 if (unlikely(!ctx)) {
1038 unmap_mft_record(base_ni);
1041 status.mft_attr_mapped = 1;
1042 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1043 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx);
1044 if (unlikely(err)) {
1052 * Find the runlist element with which the attribute extent
1053 * starts. Note, we cannot use the _attr_ version because we
1054 * have mapped the mft record. That is ok because we know the
1055 * runlist fragment must be mapped already to have ever gotten
1056 * here, so we can just use the _rl_ version.
1058 vcn = sle64_to_cpu(a->data.non_resident.lowest_vcn);
1059 rl2 = ntfs_rl_find_vcn_nolock(rl, vcn);
1061 BUG_ON(!rl2->length);
1062 BUG_ON(rl2->lcn < LCN_HOLE);
1063 highest_vcn = sle64_to_cpu(a->data.non_resident.highest_vcn);
1065 * If @highest_vcn is zero, calculate the real highest_vcn
1066 * (which can really be zero).
1069 highest_vcn = (sle64_to_cpu(
1070 a->data.non_resident.allocated_size) >>
1071 vol->cluster_size_bits) - 1;
1073 * Determine the size of the mapping pairs array for the new
1074 * extent, i.e. the old extent with the hole filled.
1076 mp_size = ntfs_get_size_for_mapping_pairs(vol, rl2, vcn,
1078 if (unlikely(mp_size <= 0)) {
1079 if (!(err = mp_size))
1081 ntfs_debug("Failed to get size for mapping pairs "
1082 "array, error code %i.", err);
1086 * Resize the attribute record to fit the new mapping pairs
1089 attr_rec_len = le32_to_cpu(a->length);
1090 err = ntfs_attr_record_resize(m, a, mp_size + le16_to_cpu(
1091 a->data.non_resident.mapping_pairs_offset));
1092 if (unlikely(err)) {
1093 BUG_ON(err != -ENOSPC);
1094 // TODO: Deal with this by using the current attribute
1095 // and fill it with as much of the mapping pairs
1096 // array as possible. Then loop over each attribute
1097 // extent rewriting the mapping pairs arrays as we go
1098 // along and if when we reach the end we have not
1099 // enough space, try to resize the last attribute
1100 // extent and if even that fails, add a new attribute
1102 // We could also try to resize at each step in the hope
1103 // that we will not need to rewrite every single extent.
1104 // Note, we may need to decompress some extents to fill
1105 // the runlist as we are walking the extents...
1106 ntfs_error(vol->sb, "Not enough space in the mft "
1107 "record for the extended attribute "
1108 "record. This case is not "
1109 "implemented yet.");
1113 status.mp_rebuilt = 1;
1115 * Generate the mapping pairs array directly into the attribute
1118 err = ntfs_mapping_pairs_build(vol, (u8*)a + le16_to_cpu(
1119 a->data.non_resident.mapping_pairs_offset),
1120 mp_size, rl2, vcn, highest_vcn, NULL);
1121 if (unlikely(err)) {
1122 ntfs_error(vol->sb, "Cannot fill hole in inode 0x%lx, "
1123 "attribute type 0x%x, because building "
1124 "the mapping pairs failed with error "
1125 "code %i.", vi->i_ino,
1126 (unsigned)le32_to_cpu(ni->type), err);
1130 /* Update the highest_vcn but only if it was not set. */
1131 if (unlikely(!a->data.non_resident.highest_vcn))
1132 a->data.non_resident.highest_vcn =
1133 cpu_to_sle64(highest_vcn);
1135 * If the attribute is sparse/compressed, update the compressed
1136 * size in the ntfs_inode structure and the attribute record.
1138 if (likely(NInoSparse(ni) || NInoCompressed(ni))) {
1140 * If we are not in the first attribute extent, switch
1141 * to it, but first ensure the changes will make it to
1144 if (a->data.non_resident.lowest_vcn) {
1145 flush_dcache_mft_record_page(ctx->ntfs_ino);
1146 mark_mft_record_dirty(ctx->ntfs_ino);
1147 ntfs_attr_reinit_search_ctx(ctx);
1148 err = ntfs_attr_lookup(ni->type, ni->name,
1149 ni->name_len, CASE_SENSITIVE,
1151 if (unlikely(err)) {
1152 status.attr_switched = 1;
1155 /* @m is not used any more so do not set it. */
1158 write_lock_irqsave(&ni->size_lock, flags);
1159 ni->itype.compressed.size += vol->cluster_size;
1160 a->data.non_resident.compressed_size =
1161 cpu_to_sle64(ni->itype.compressed.size);
1162 write_unlock_irqrestore(&ni->size_lock, flags);
1164 /* Ensure the changes make it to disk. */
1165 flush_dcache_mft_record_page(ctx->ntfs_ino);
1166 mark_mft_record_dirty(ctx->ntfs_ino);
1167 ntfs_attr_put_search_ctx(ctx);
1168 unmap_mft_record(base_ni);
1169 /* Successfully filled the hole. */
1170 status.runlist_merged = 0;
1171 status.mft_attr_mapped = 0;
1172 status.mp_rebuilt = 0;
1173 /* Setup the map cache and use that to deal with the buffer. */
1177 lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
1180 * If the number of remaining clusters in the @pages is smaller
1181 * or equal to the number of cached clusters, unlock the
1182 * runlist as the map cache will be used from now on.
1184 if (likely(vcn + vcn_len >= cend)) {
1185 up_write(&ni->runlist.lock);
1186 rl_write_locked = false;
1189 goto map_buffer_cached;
1190 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1191 /* If there are no errors, do the next page. */
1192 if (likely(!err && ++u < nr_pages))
1194 /* If there are no errors, release the runlist lock if we took it. */
1196 if (unlikely(rl_write_locked)) {
1197 up_write(&ni->runlist.lock);
1198 rl_write_locked = false;
1199 } else if (unlikely(rl))
1200 up_read(&ni->runlist.lock);
1203 /* If we issued read requests, let them complete. */
1204 read_lock_irqsave(&ni->size_lock, flags);
1205 initialized_size = ni->initialized_size;
1206 read_unlock_irqrestore(&ni->size_lock, flags);
1207 while (wait_bh > wait) {
1210 if (likely(buffer_uptodate(bh))) {
1212 bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) +
1215 * If the buffer overflows the initialized size, need
1216 * to zero the overflowing region.
1218 if (unlikely(bh_pos + blocksize > initialized_size)) {
1221 if (likely(bh_pos < initialized_size))
1222 ofs = initialized_size - bh_pos;
1223 zero_user_segment(page, bh_offset(bh) + ofs,
1226 } else /* if (unlikely(!buffer_uptodate(bh))) */
1230 /* Clear buffer_new on all buffers. */
1233 bh = head = page_buffers(pages[u]);
1236 clear_buffer_new(bh);
1237 } while ((bh = bh->b_this_page) != head);
1238 } while (++u < nr_pages);
1239 ntfs_debug("Done.");
1242 if (status.attr_switched) {
1243 /* Get back to the attribute extent we modified. */
1244 ntfs_attr_reinit_search_ctx(ctx);
1245 if (ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1246 CASE_SENSITIVE, bh_cpos, NULL, 0, ctx)) {
1247 ntfs_error(vol->sb, "Failed to find required "
1248 "attribute extent of attribute in "
1249 "error code path. Run chkdsk to "
1251 write_lock_irqsave(&ni->size_lock, flags);
1252 ni->itype.compressed.size += vol->cluster_size;
1253 write_unlock_irqrestore(&ni->size_lock, flags);
1254 flush_dcache_mft_record_page(ctx->ntfs_ino);
1255 mark_mft_record_dirty(ctx->ntfs_ino);
1257 * The only thing that is now wrong is the compressed
1258 * size of the base attribute extent which chkdsk
1259 * should be able to fix.
1265 status.attr_switched = 0;
1269 * If the runlist has been modified, need to restore it by punching a
1270 * hole into it and we then need to deallocate the on-disk cluster as
1271 * well. Note, we only modify the runlist if we are able to generate a
1272 * new mapping pairs array, i.e. only when the mapped attribute extent
1275 if (status.runlist_merged && !status.attr_switched) {
1276 BUG_ON(!rl_write_locked);
1277 /* Make the file cluster we allocated sparse in the runlist. */
1278 if (ntfs_rl_punch_nolock(vol, &ni->runlist, bh_cpos, 1)) {
1279 ntfs_error(vol->sb, "Failed to punch hole into "
1280 "attribute runlist in error code "
1281 "path. Run chkdsk to recover the "
1284 } else /* if (success) */ {
1285 status.runlist_merged = 0;
1287 * Deallocate the on-disk cluster we allocated but only
1288 * if we succeeded in punching its vcn out of the
1291 down_write(&vol->lcnbmp_lock);
1292 if (ntfs_bitmap_clear_bit(vol->lcnbmp_ino, lcn)) {
1293 ntfs_error(vol->sb, "Failed to release "
1294 "allocated cluster in error "
1295 "code path. Run chkdsk to "
1296 "recover the lost cluster.");
1299 up_write(&vol->lcnbmp_lock);
1303 * Resize the attribute record to its old size and rebuild the mapping
1304 * pairs array. Note, we only can do this if the runlist has been
1305 * restored to its old state which also implies that the mapped
1306 * attribute extent is not switched.
1308 if (status.mp_rebuilt && !status.runlist_merged) {
1309 if (ntfs_attr_record_resize(m, a, attr_rec_len)) {
1310 ntfs_error(vol->sb, "Failed to restore attribute "
1311 "record in error code path. Run "
1312 "chkdsk to recover.");
1314 } else /* if (success) */ {
1315 if (ntfs_mapping_pairs_build(vol, (u8*)a +
1316 le16_to_cpu(a->data.non_resident.
1317 mapping_pairs_offset), attr_rec_len -
1318 le16_to_cpu(a->data.non_resident.
1319 mapping_pairs_offset), ni->runlist.rl,
1320 vcn, highest_vcn, NULL)) {
1321 ntfs_error(vol->sb, "Failed to restore "
1322 "mapping pairs array in error "
1323 "code path. Run chkdsk to "
1327 flush_dcache_mft_record_page(ctx->ntfs_ino);
1328 mark_mft_record_dirty(ctx->ntfs_ino);
1331 /* Release the mft record and the attribute. */
1332 if (status.mft_attr_mapped) {
1333 ntfs_attr_put_search_ctx(ctx);
1334 unmap_mft_record(base_ni);
1336 /* Release the runlist lock. */
1337 if (rl_write_locked)
1338 up_write(&ni->runlist.lock);
1340 up_read(&ni->runlist.lock);
1342 * Zero out any newly allocated blocks to avoid exposing stale data.
1343 * If BH_New is set, we know that the block was newly allocated above
1344 * and that it has not been fully zeroed and marked dirty yet.
1348 end = bh_cpos << vol->cluster_size_bits;
1351 bh = head = page_buffers(page);
1353 if (u == nr_pages &&
1354 ((s64)page->index << PAGE_CACHE_SHIFT) +
1355 bh_offset(bh) >= end)
1357 if (!buffer_new(bh))
1359 clear_buffer_new(bh);
1360 if (!buffer_uptodate(bh)) {
1361 if (PageUptodate(page))
1362 set_buffer_uptodate(bh);
1364 zero_user(page, bh_offset(bh),
1366 set_buffer_uptodate(bh);
1369 mark_buffer_dirty(bh);
1370 } while ((bh = bh->b_this_page) != head);
1371 } while (++u <= nr_pages);
1372 ntfs_error(vol->sb, "Failed. Returning error code %i.", err);
1376 static inline void ntfs_flush_dcache_pages(struct page **pages,
1381 * Warning: Do not do the decrement at the same time as the call to
1382 * flush_dcache_page() because it is a NULL macro on i386 and hence the
1383 * decrement never happens so the loop never terminates.
1387 flush_dcache_page(pages[nr_pages]);
1388 } while (nr_pages > 0);
1392 * ntfs_commit_pages_after_non_resident_write - commit the received data
1393 * @pages: array of destination pages
1394 * @nr_pages: number of pages in @pages
1395 * @pos: byte position in file at which the write begins
1396 * @bytes: number of bytes to be written
1398 * See description of ntfs_commit_pages_after_write(), below.
1400 static inline int ntfs_commit_pages_after_non_resident_write(
1401 struct page **pages, const unsigned nr_pages,
1402 s64 pos, size_t bytes)
1404 s64 end, initialized_size;
1406 ntfs_inode *ni, *base_ni;
1407 struct buffer_head *bh, *head;
1408 ntfs_attr_search_ctx *ctx;
1411 unsigned long flags;
1412 unsigned blocksize, u;
1415 vi = pages[0]->mapping->host;
1417 blocksize = vi->i_sb->s_blocksize;
1426 bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
1427 bh = head = page_buffers(page);
1432 bh_end = bh_pos + blocksize;
1433 if (bh_end <= pos || bh_pos >= end) {
1434 if (!buffer_uptodate(bh))
1437 set_buffer_uptodate(bh);
1438 mark_buffer_dirty(bh);
1440 } while (bh_pos += blocksize, (bh = bh->b_this_page) != head);
1442 * If all buffers are now uptodate but the page is not, set the
1445 if (!partial && !PageUptodate(page))
1446 SetPageUptodate(page);
1447 } while (++u < nr_pages);
1449 * Finally, if we do not need to update initialized_size or i_size we
1452 read_lock_irqsave(&ni->size_lock, flags);
1453 initialized_size = ni->initialized_size;
1454 read_unlock_irqrestore(&ni->size_lock, flags);
1455 if (end <= initialized_size) {
1456 ntfs_debug("Done.");
1460 * Update initialized_size/i_size as appropriate, both in the inode and
1466 base_ni = ni->ext.base_ntfs_ino;
1467 /* Map, pin, and lock the mft record. */
1468 m = map_mft_record(base_ni);
1475 BUG_ON(!NInoNonResident(ni));
1476 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1477 if (unlikely(!ctx)) {
1481 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1482 CASE_SENSITIVE, 0, NULL, 0, ctx);
1483 if (unlikely(err)) {
1489 BUG_ON(!a->non_resident);
1490 write_lock_irqsave(&ni->size_lock, flags);
1491 BUG_ON(end > ni->allocated_size);
1492 ni->initialized_size = end;
1493 a->data.non_resident.initialized_size = cpu_to_sle64(end);
1494 if (end > i_size_read(vi)) {
1495 i_size_write(vi, end);
1496 a->data.non_resident.data_size =
1497 a->data.non_resident.initialized_size;
1499 write_unlock_irqrestore(&ni->size_lock, flags);
1500 /* Mark the mft record dirty, so it gets written back. */
1501 flush_dcache_mft_record_page(ctx->ntfs_ino);
1502 mark_mft_record_dirty(ctx->ntfs_ino);
1503 ntfs_attr_put_search_ctx(ctx);
1504 unmap_mft_record(base_ni);
1505 ntfs_debug("Done.");
1509 ntfs_attr_put_search_ctx(ctx);
1511 unmap_mft_record(base_ni);
1512 ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
1515 NVolSetErrors(ni->vol);
1520 * ntfs_commit_pages_after_write - commit the received data
1521 * @pages: array of destination pages
1522 * @nr_pages: number of pages in @pages
1523 * @pos: byte position in file at which the write begins
1524 * @bytes: number of bytes to be written
1526 * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
1527 * (@pages[0]->mapping->host). There are @nr_pages pages in @pages which are
1528 * locked but not kmap()ped. The source data has already been copied into the
1529 * @page. ntfs_prepare_pages_for_non_resident_write() has been called before
1530 * the data was copied (for non-resident attributes only) and it returned
1533 * Need to set uptodate and mark dirty all buffers within the boundary of the
1534 * write. If all buffers in a page are uptodate we set the page uptodate, too.
1536 * Setting the buffers dirty ensures that they get written out later when
1537 * ntfs_writepage() is invoked by the VM.
1539 * Finally, we need to update i_size and initialized_size as appropriate both
1540 * in the inode and the mft record.
1542 * This is modelled after fs/buffer.c::generic_commit_write(), which marks
1543 * buffers uptodate and dirty, sets the page uptodate if all buffers in the
1544 * page are uptodate, and updates i_size if the end of io is beyond i_size. In
1545 * that case, it also marks the inode dirty.
1547 * If things have gone as outlined in
1548 * ntfs_prepare_pages_for_non_resident_write(), we do not need to do any page
1549 * content modifications here for non-resident attributes. For resident
1550 * attributes we need to do the uptodate bringing here which we combine with
1551 * the copying into the mft record which means we save one atomic kmap.
1553 * Return 0 on success or -errno on error.
1555 static int ntfs_commit_pages_after_write(struct page **pages,
1556 const unsigned nr_pages, s64 pos, size_t bytes)
1558 s64 end, initialized_size;
1561 ntfs_inode *ni, *base_ni;
1563 ntfs_attr_search_ctx *ctx;
1566 char *kattr, *kaddr;
1567 unsigned long flags;
1575 vi = page->mapping->host;
1577 ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, start page "
1578 "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
1579 vi->i_ino, ni->type, page->index, nr_pages,
1580 (long long)pos, bytes);
1581 if (NInoNonResident(ni))
1582 return ntfs_commit_pages_after_non_resident_write(pages,
1583 nr_pages, pos, bytes);
1584 BUG_ON(nr_pages > 1);
1586 * Attribute is resident, implying it is not compressed, encrypted, or
1592 base_ni = ni->ext.base_ntfs_ino;
1593 BUG_ON(NInoNonResident(ni));
1594 /* Map, pin, and lock the mft record. */
1595 m = map_mft_record(base_ni);
1602 ctx = ntfs_attr_get_search_ctx(base_ni, m);
1603 if (unlikely(!ctx)) {
1607 err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1608 CASE_SENSITIVE, 0, NULL, 0, ctx);
1609 if (unlikely(err)) {
1615 BUG_ON(a->non_resident);
1616 /* The total length of the attribute value. */
1617 attr_len = le32_to_cpu(a->data.resident.value_length);
1618 i_size = i_size_read(vi);
1619 BUG_ON(attr_len != i_size);
1620 BUG_ON(pos > attr_len);
1622 BUG_ON(end > le32_to_cpu(a->length) -
1623 le16_to_cpu(a->data.resident.value_offset));
1624 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1625 kaddr = kmap_atomic(page);
1626 /* Copy the received data from the page to the mft record. */
1627 memcpy(kattr + pos, kaddr + pos, bytes);
1628 /* Update the attribute length if necessary. */
1629 if (end > attr_len) {
1631 a->data.resident.value_length = cpu_to_le32(attr_len);
1634 * If the page is not uptodate, bring the out of bounds area(s)
1635 * uptodate by copying data from the mft record to the page.
1637 if (!PageUptodate(page)) {
1639 memcpy(kaddr, kattr, pos);
1641 memcpy(kaddr + end, kattr + end, attr_len - end);
1642 /* Zero the region outside the end of the attribute value. */
1643 memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1644 flush_dcache_page(page);
1645 SetPageUptodate(page);
1647 kunmap_atomic(kaddr);
1648 /* Update initialized_size/i_size if necessary. */
1649 read_lock_irqsave(&ni->size_lock, flags);
1650 initialized_size = ni->initialized_size;
1651 BUG_ON(end > ni->allocated_size);
1652 read_unlock_irqrestore(&ni->size_lock, flags);
1653 BUG_ON(initialized_size != i_size);
1654 if (end > initialized_size) {
1655 write_lock_irqsave(&ni->size_lock, flags);
1656 ni->initialized_size = end;
1657 i_size_write(vi, end);
1658 write_unlock_irqrestore(&ni->size_lock, flags);
1660 /* Mark the mft record dirty, so it gets written back. */
1661 flush_dcache_mft_record_page(ctx->ntfs_ino);
1662 mark_mft_record_dirty(ctx->ntfs_ino);
1663 ntfs_attr_put_search_ctx(ctx);
1664 unmap_mft_record(base_ni);
1665 ntfs_debug("Done.");
1668 if (err == -ENOMEM) {
1669 ntfs_warning(vi->i_sb, "Error allocating memory required to "
1670 "commit the write.");
1671 if (PageUptodate(page)) {
1672 ntfs_warning(vi->i_sb, "Page is uptodate, setting "
1673 "dirty so the write will be retried "
1674 "later on by the VM.");
1676 * Put the page on mapping->dirty_pages, but leave its
1677 * buffers' dirty state as-is.
1679 __set_page_dirty_nobuffers(page);
1682 ntfs_error(vi->i_sb, "Page is not uptodate. Written "
1683 "data has been lost.");
1685 ntfs_error(vi->i_sb, "Resident attribute commit write failed "
1686 "with error %i.", err);
1687 NVolSetErrors(ni->vol);
1690 ntfs_attr_put_search_ctx(ctx);
1692 unmap_mft_record(base_ni);
1697 * Copy as much as we can into the pages and return the number of bytes which
1698 * were successfully copied. If a fault is encountered then clear the pages
1699 * out to (ofs + bytes) and return the number of bytes which were copied.
1701 static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages,
1702 unsigned ofs, struct iov_iter *i, size_t bytes)
1704 struct page **last_page = pages + nr_pages;
1706 struct iov_iter data = *i;
1707 unsigned len, copied;
1710 len = PAGE_CACHE_SIZE - ofs;
1713 copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs,
1719 iov_iter_advance(&data, copied);
1723 } while (++pages < last_page);
1727 /* Zero the rest of the target like __copy_from_user(). */
1728 len = PAGE_CACHE_SIZE - copied;
1732 zero_user(*pages, copied, len);
1735 len = PAGE_CACHE_SIZE;
1736 } while (++pages < last_page);
1741 * ntfs_perform_write - perform buffered write to a file
1742 * @file: file to write to
1743 * @i: iov_iter with data to write
1744 * @pos: byte offset in file at which to begin writing to
1746 static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i,
1749 struct address_space *mapping = file->f_mapping;
1750 struct inode *vi = mapping->host;
1751 ntfs_inode *ni = NTFS_I(vi);
1752 ntfs_volume *vol = ni->vol;
1753 struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER];
1754 struct page *cached_page = NULL;
1758 ssize_t status, written = 0;
1761 ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, pos "
1762 "0x%llx, count 0x%lx.", vi->i_ino,
1763 (unsigned)le32_to_cpu(ni->type),
1764 (unsigned long long)pos,
1765 (unsigned long)iov_iter_count(i));
1767 * If a previous ntfs_truncate() failed, repeat it and abort if it
1770 if (unlikely(NInoTruncateFailed(ni))) {
1774 err = ntfs_truncate(vi);
1775 if (err || NInoTruncateFailed(ni)) {
1778 ntfs_error(vol->sb, "Cannot perform write to inode "
1779 "0x%lx, attribute type 0x%x, because "
1780 "ntfs_truncate() failed (error code "
1782 (unsigned)le32_to_cpu(ni->type), err);
1787 * Determine the number of pages per cluster for non-resident
1791 if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni))
1792 nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT;
1796 pgoff_t idx, start_idx;
1797 unsigned ofs, do_pages, u;
1800 start_idx = idx = pos >> PAGE_CACHE_SHIFT;
1801 ofs = pos & ~PAGE_CACHE_MASK;
1802 bytes = PAGE_CACHE_SIZE - ofs;
1805 vcn = pos >> vol->cluster_size_bits;
1806 if (vcn != last_vcn) {
1809 * Get the lcn of the vcn the write is in. If
1810 * it is a hole, need to lock down all pages in
1813 down_read(&ni->runlist.lock);
1814 lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
1815 vol->cluster_size_bits, false);
1816 up_read(&ni->runlist.lock);
1817 if (unlikely(lcn < LCN_HOLE)) {
1818 if (lcn == LCN_ENOMEM)
1822 ntfs_error(vol->sb, "Cannot "
1825 "attribute type 0x%x, "
1826 "because the attribute "
1828 vi->i_ino, (unsigned)
1829 le32_to_cpu(ni->type));
1833 if (lcn == LCN_HOLE) {
1834 start_idx = (pos & ~(s64)
1835 vol->cluster_size_mask)
1836 >> PAGE_CACHE_SHIFT;
1837 bytes = vol->cluster_size - (pos &
1838 vol->cluster_size_mask);
1839 do_pages = nr_pages;
1843 if (bytes > iov_iter_count(i))
1844 bytes = iov_iter_count(i);
1847 * Bring in the user page(s) that we will copy from _first_.
1848 * Otherwise there is a nasty deadlock on copying from the same
1849 * page(s) as we are writing to, without it/them being marked
1850 * up-to-date. Note, at present there is nothing to stop the
1851 * pages being swapped out between us bringing them into memory
1852 * and doing the actual copying.
1854 if (unlikely(iov_iter_fault_in_multipages_readable(i, bytes))) {
1858 /* Get and lock @do_pages starting at index @start_idx. */
1859 status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
1860 pages, &cached_page);
1861 if (unlikely(status))
1864 * For non-resident attributes, we need to fill any holes with
1865 * actual clusters and ensure all bufferes are mapped. We also
1866 * need to bring uptodate any buffers that are only partially
1869 if (NInoNonResident(ni)) {
1870 status = ntfs_prepare_pages_for_non_resident_write(
1871 pages, do_pages, pos, bytes);
1872 if (unlikely(status)) {
1874 unlock_page(pages[--do_pages]);
1875 page_cache_release(pages[do_pages]);
1880 u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index;
1881 copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs,
1883 ntfs_flush_dcache_pages(pages + u, do_pages - u);
1885 if (likely(copied == bytes)) {
1886 status = ntfs_commit_pages_after_write(pages, do_pages,
1892 unlock_page(pages[--do_pages]);
1893 page_cache_release(pages[do_pages]);
1895 if (unlikely(status < 0))
1899 if (unlikely(!copied)) {
1903 * We failed to copy anything. Fall back to single
1904 * segment length write.
1906 * This is needed to avoid possible livelock in the
1907 * case that all segments in the iov cannot be copied
1908 * at once without a pagefault.
1910 sc = iov_iter_single_seg_count(i);
1915 iov_iter_advance(i, copied);
1918 balance_dirty_pages_ratelimited(mapping);
1919 if (fatal_signal_pending(current)) {
1923 } while (iov_iter_count(i));
1925 page_cache_release(cached_page);
1926 ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
1927 written ? "written" : "status", (unsigned long)written,
1929 return written ? written : status;
1933 * ntfs_file_write_iter_nolock - write data to a file
1934 * @iocb: IO state structure (file, offset, etc.)
1935 * @from: iov_iter with data to write
1937 * Basically the same as __generic_file_write_iter() except that it ends
1938 * up calling ntfs_perform_write() instead of generic_perform_write() and that
1939 * O_DIRECT is not implemented.
1941 static ssize_t ntfs_file_write_iter_nolock(struct kiocb *iocb,
1942 struct iov_iter *from)
1944 struct file *file = iocb->ki_filp;
1945 loff_t pos = iocb->ki_pos;
1946 ssize_t written = 0;
1948 size_t count = iov_iter_count(from);
1950 err = ntfs_prepare_file_for_write(file, &pos, &count);
1951 if (count && !err) {
1952 iov_iter_truncate(from, count);
1953 written = ntfs_perform_write(file, from, pos);
1954 if (likely(written >= 0))
1955 iocb->ki_pos = pos + written;
1957 current->backing_dev_info = NULL;
1958 return written ? written : err;
1962 * ntfs_file_write_iter - simple wrapper for ntfs_file_write_iter_nolock()
1963 * @iocb: IO state structure
1964 * @from: iov_iter with data to write
1966 * Basically the same as generic_file_write_iter() except that it ends up
1967 * calling ntfs_file_write_iter_nolock() instead of
1968 * __generic_file_write_iter().
1970 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1972 struct file *file = iocb->ki_filp;
1973 struct inode *vi = file_inode(file);
1976 mutex_lock(&vi->i_mutex);
1977 ret = ntfs_file_write_iter_nolock(iocb, from);
1978 mutex_unlock(&vi->i_mutex);
1982 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
1990 * ntfs_file_fsync - sync a file to disk
1991 * @filp: file to be synced
1992 * @datasync: if non-zero only flush user data and not metadata
1994 * Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
1995 * system calls. This function is inspired by fs/buffer.c::file_fsync().
1997 * If @datasync is false, write the mft record and all associated extent mft
1998 * records as well as the $DATA attribute and then sync the block device.
2000 * If @datasync is true and the attribute is non-resident, we skip the writing
2001 * of the mft record and all associated extent mft records (this might still
2002 * happen due to the write_inode_now() call).
2004 * Also, if @datasync is true, we do not wait on the inode to be written out
2005 * but we always wait on the page cache pages to be written out.
2007 * Locking: Caller must hold i_mutex on the inode.
2009 * TODO: We should probably also write all attribute/index inodes associated
2010 * with this inode but since we have no simple way of getting to them we ignore
2011 * this problem for now.
2013 static int ntfs_file_fsync(struct file *filp, loff_t start, loff_t end,
2016 struct inode *vi = filp->f_mapping->host;
2019 ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
2021 err = filemap_write_and_wait_range(vi->i_mapping, start, end);
2024 mutex_lock(&vi->i_mutex);
2026 BUG_ON(S_ISDIR(vi->i_mode));
2027 if (!datasync || !NInoNonResident(NTFS_I(vi)))
2028 ret = __ntfs_write_inode(vi, 1);
2029 write_inode_now(vi, !datasync);
2031 * NOTE: If we were to use mapping->private_list (see ext2 and
2032 * fs/buffer.c) for dirty blocks then we could optimize the below to be
2033 * sync_mapping_buffers(vi->i_mapping).
2035 err = sync_blockdev(vi->i_sb->s_bdev);
2036 if (unlikely(err && !ret))
2039 ntfs_debug("Done.");
2041 ntfs_warning(vi->i_sb, "Failed to f%ssync inode 0x%lx. Error "
2042 "%u.", datasync ? "data" : "", vi->i_ino, -ret);
2043 mutex_unlock(&vi->i_mutex);
2047 #endif /* NTFS_RW */
2049 const struct file_operations ntfs_file_ops = {
2050 .llseek = generic_file_llseek,
2051 .read = new_sync_read,
2052 .read_iter = generic_file_read_iter,
2054 .write = new_sync_write,
2055 .write_iter = ntfs_file_write_iter,
2056 .fsync = ntfs_file_fsync,
2057 #endif /* NTFS_RW */
2058 .mmap = generic_file_mmap,
2059 .open = ntfs_file_open,
2060 .splice_read = generic_file_splice_read,
2063 const struct inode_operations ntfs_file_inode_ops = {
2065 .setattr = ntfs_setattr,
2066 #endif /* NTFS_RW */
2069 const struct file_operations ntfs_empty_file_ops = {};
2071 const struct inode_operations ntfs_empty_inode_ops = {};