]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/buffer.c
Merge branch 'upstream' of git://git.infradead.org/users/pcmoore/selinux into for...
[karo-tx-linux.git] / fs / buffer.c
1 /*
2  *  linux/fs/buffer.c
3  *
4  *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
5  */
6
7 /*
8  * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
9  *
10  * Removed a lot of unnecessary code and simplified things now that
11  * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
12  *
13  * Speed up hash, lru, and free list operations.  Use gfp() for allocating
14  * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
15  *
16  * Added 32k buffer block sizes - these are required older ARM systems. - RMK
17  *
18  * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/syscalls.h>
23 #include <linux/fs.h>
24 #include <linux/mm.h>
25 #include <linux/percpu.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/blkdev.h>
29 #include <linux/file.h>
30 #include <linux/quotaops.h>
31 #include <linux/highmem.h>
32 #include <linux/export.h>
33 #include <linux/backing-dev.h>
34 #include <linux/writeback.h>
35 #include <linux/hash.h>
36 #include <linux/suspend.h>
37 #include <linux/buffer_head.h>
38 #include <linux/task_io_accounting_ops.h>
39 #include <linux/bio.h>
40 #include <linux/notifier.h>
41 #include <linux/cpu.h>
42 #include <linux/bitops.h>
43 #include <linux/mpage.h>
44 #include <linux/bit_spinlock.h>
45 #include <trace/events/block.h>
46
47 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
48 static int submit_bh_wbc(int rw, struct buffer_head *bh,
49                          unsigned long bio_flags,
50                          struct writeback_control *wbc);
51
52 #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
53
54 void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
55 {
56         bh->b_end_io = handler;
57         bh->b_private = private;
58 }
59 EXPORT_SYMBOL(init_buffer);
60
61 inline void touch_buffer(struct buffer_head *bh)
62 {
63         trace_block_touch_buffer(bh);
64         mark_page_accessed(bh->b_page);
65 }
66 EXPORT_SYMBOL(touch_buffer);
67
68 void __lock_buffer(struct buffer_head *bh)
69 {
70         wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
71 }
72 EXPORT_SYMBOL(__lock_buffer);
73
74 void unlock_buffer(struct buffer_head *bh)
75 {
76         clear_bit_unlock(BH_Lock, &bh->b_state);
77         smp_mb__after_atomic();
78         wake_up_bit(&bh->b_state, BH_Lock);
79 }
80 EXPORT_SYMBOL(unlock_buffer);
81
82 /*
83  * Returns if the page has dirty or writeback buffers. If all the buffers
84  * are unlocked and clean then the PageDirty information is stale. If
85  * any of the pages are locked, it is assumed they are locked for IO.
86  */
87 void buffer_check_dirty_writeback(struct page *page,
88                                      bool *dirty, bool *writeback)
89 {
90         struct buffer_head *head, *bh;
91         *dirty = false;
92         *writeback = false;
93
94         BUG_ON(!PageLocked(page));
95
96         if (!page_has_buffers(page))
97                 return;
98
99         if (PageWriteback(page))
100                 *writeback = true;
101
102         head = page_buffers(page);
103         bh = head;
104         do {
105                 if (buffer_locked(bh))
106                         *writeback = true;
107
108                 if (buffer_dirty(bh))
109                         *dirty = true;
110
111                 bh = bh->b_this_page;
112         } while (bh != head);
113 }
114 EXPORT_SYMBOL(buffer_check_dirty_writeback);
115
116 /*
117  * Block until a buffer comes unlocked.  This doesn't stop it
118  * from becoming locked again - you have to lock it yourself
119  * if you want to preserve its state.
120  */
121 void __wait_on_buffer(struct buffer_head * bh)
122 {
123         wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124 }
125 EXPORT_SYMBOL(__wait_on_buffer);
126
127 static void
128 __clear_page_buffers(struct page *page)
129 {
130         ClearPagePrivate(page);
131         set_page_private(page, 0);
132         page_cache_release(page);
133 }
134
135 static void buffer_io_error(struct buffer_head *bh, char *msg)
136 {
137         char b[BDEVNAME_SIZE];
138
139         if (!test_bit(BH_Quiet, &bh->b_state))
140                 printk_ratelimited(KERN_ERR
141                         "Buffer I/O error on dev %s, logical block %llu%s\n",
142                         bdevname(bh->b_bdev, b),
143                         (unsigned long long)bh->b_blocknr, msg);
144 }
145
146 /*
147  * End-of-IO handler helper function which does not touch the bh after
148  * unlocking it.
149  * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
150  * a race there is benign: unlock_buffer() only use the bh's address for
151  * hashing after unlocking the buffer, so it doesn't actually touch the bh
152  * itself.
153  */
154 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
155 {
156         if (uptodate) {
157                 set_buffer_uptodate(bh);
158         } else {
159                 /* This happens, due to failed READA attempts. */
160                 clear_buffer_uptodate(bh);
161         }
162         unlock_buffer(bh);
163 }
164
165 /*
166  * Default synchronous end-of-IO handler..  Just mark it up-to-date and
167  * unlock the buffer. This is what ll_rw_block uses too.
168  */
169 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
170 {
171         __end_buffer_read_notouch(bh, uptodate);
172         put_bh(bh);
173 }
174 EXPORT_SYMBOL(end_buffer_read_sync);
175
176 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
177 {
178         if (uptodate) {
179                 set_buffer_uptodate(bh);
180         } else {
181                 buffer_io_error(bh, ", lost sync page write");
182                 set_buffer_write_io_error(bh);
183                 clear_buffer_uptodate(bh);
184         }
185         unlock_buffer(bh);
186         put_bh(bh);
187 }
188 EXPORT_SYMBOL(end_buffer_write_sync);
189
190 /*
191  * Various filesystems appear to want __find_get_block to be non-blocking.
192  * But it's the page lock which protects the buffers.  To get around this,
193  * we get exclusion from try_to_free_buffers with the blockdev mapping's
194  * private_lock.
195  *
196  * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
197  * may be quite high.  This code could TryLock the page, and if that
198  * succeeds, there is no need to take private_lock. (But if
199  * private_lock is contended then so is mapping->tree_lock).
200  */
201 static struct buffer_head *
202 __find_get_block_slow(struct block_device *bdev, sector_t block)
203 {
204         struct inode *bd_inode = bdev->bd_inode;
205         struct address_space *bd_mapping = bd_inode->i_mapping;
206         struct buffer_head *ret = NULL;
207         pgoff_t index;
208         struct buffer_head *bh;
209         struct buffer_head *head;
210         struct page *page;
211         int all_mapped = 1;
212
213         index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
214         page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
215         if (!page)
216                 goto out;
217
218         spin_lock(&bd_mapping->private_lock);
219         if (!page_has_buffers(page))
220                 goto out_unlock;
221         head = page_buffers(page);
222         bh = head;
223         do {
224                 if (!buffer_mapped(bh))
225                         all_mapped = 0;
226                 else if (bh->b_blocknr == block) {
227                         ret = bh;
228                         get_bh(bh);
229                         goto out_unlock;
230                 }
231                 bh = bh->b_this_page;
232         } while (bh != head);
233
234         /* we might be here because some of the buffers on this page are
235          * not mapped.  This is due to various races between
236          * file io on the block device and getblk.  It gets dealt with
237          * elsewhere, don't buffer_error if we had some unmapped buffers
238          */
239         if (all_mapped) {
240                 char b[BDEVNAME_SIZE];
241
242                 printk("__find_get_block_slow() failed. "
243                         "block=%llu, b_blocknr=%llu\n",
244                         (unsigned long long)block,
245                         (unsigned long long)bh->b_blocknr);
246                 printk("b_state=0x%08lx, b_size=%zu\n",
247                         bh->b_state, bh->b_size);
248                 printk("device %s blocksize: %d\n", bdevname(bdev, b),
249                         1 << bd_inode->i_blkbits);
250         }
251 out_unlock:
252         spin_unlock(&bd_mapping->private_lock);
253         page_cache_release(page);
254 out:
255         return ret;
256 }
257
258 /*
259  * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
260  */
261 static void free_more_memory(void)
262 {
263         struct zone *zone;
264         int nid;
265
266         wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
267         yield();
268
269         for_each_online_node(nid) {
270                 (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
271                                                 gfp_zone(GFP_NOFS), NULL,
272                                                 &zone);
273                 if (zone)
274                         try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
275                                                 GFP_NOFS, NULL);
276         }
277 }
278
279 /*
280  * I/O completion handler for block_read_full_page() - pages
281  * which come unlocked at the end of I/O.
282  */
283 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
284 {
285         unsigned long flags;
286         struct buffer_head *first;
287         struct buffer_head *tmp;
288         struct page *page;
289         int page_uptodate = 1;
290
291         BUG_ON(!buffer_async_read(bh));
292
293         page = bh->b_page;
294         if (uptodate) {
295                 set_buffer_uptodate(bh);
296         } else {
297                 clear_buffer_uptodate(bh);
298                 buffer_io_error(bh, ", async page read");
299                 SetPageError(page);
300         }
301
302         /*
303          * Be _very_ careful from here on. Bad things can happen if
304          * two buffer heads end IO at almost the same time and both
305          * decide that the page is now completely done.
306          */
307         first = page_buffers(page);
308         local_irq_save(flags);
309         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
310         clear_buffer_async_read(bh);
311         unlock_buffer(bh);
312         tmp = bh;
313         do {
314                 if (!buffer_uptodate(tmp))
315                         page_uptodate = 0;
316                 if (buffer_async_read(tmp)) {
317                         BUG_ON(!buffer_locked(tmp));
318                         goto still_busy;
319                 }
320                 tmp = tmp->b_this_page;
321         } while (tmp != bh);
322         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
323         local_irq_restore(flags);
324
325         /*
326          * If none of the buffers had errors and they are all
327          * uptodate then we can set the page uptodate.
328          */
329         if (page_uptodate && !PageError(page))
330                 SetPageUptodate(page);
331         unlock_page(page);
332         return;
333
334 still_busy:
335         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
336         local_irq_restore(flags);
337         return;
338 }
339
340 /*
341  * Completion handler for block_write_full_page() - pages which are unlocked
342  * during I/O, and which have PageWriteback cleared upon I/O completion.
343  */
344 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
345 {
346         unsigned long flags;
347         struct buffer_head *first;
348         struct buffer_head *tmp;
349         struct page *page;
350
351         BUG_ON(!buffer_async_write(bh));
352
353         page = bh->b_page;
354         if (uptodate) {
355                 set_buffer_uptodate(bh);
356         } else {
357                 buffer_io_error(bh, ", lost async page write");
358                 set_bit(AS_EIO, &page->mapping->flags);
359                 set_buffer_write_io_error(bh);
360                 clear_buffer_uptodate(bh);
361                 SetPageError(page);
362         }
363
364         first = page_buffers(page);
365         local_irq_save(flags);
366         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
367
368         clear_buffer_async_write(bh);
369         unlock_buffer(bh);
370         tmp = bh->b_this_page;
371         while (tmp != bh) {
372                 if (buffer_async_write(tmp)) {
373                         BUG_ON(!buffer_locked(tmp));
374                         goto still_busy;
375                 }
376                 tmp = tmp->b_this_page;
377         }
378         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
379         local_irq_restore(flags);
380         end_page_writeback(page);
381         return;
382
383 still_busy:
384         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
385         local_irq_restore(flags);
386         return;
387 }
388 EXPORT_SYMBOL(end_buffer_async_write);
389
390 /*
391  * If a page's buffers are under async readin (end_buffer_async_read
392  * completion) then there is a possibility that another thread of
393  * control could lock one of the buffers after it has completed
394  * but while some of the other buffers have not completed.  This
395  * locked buffer would confuse end_buffer_async_read() into not unlocking
396  * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
397  * that this buffer is not under async I/O.
398  *
399  * The page comes unlocked when it has no locked buffer_async buffers
400  * left.
401  *
402  * PageLocked prevents anyone starting new async I/O reads any of
403  * the buffers.
404  *
405  * PageWriteback is used to prevent simultaneous writeout of the same
406  * page.
407  *
408  * PageLocked prevents anyone from starting writeback of a page which is
409  * under read I/O (PageWriteback is only ever set against a locked page).
410  */
411 static void mark_buffer_async_read(struct buffer_head *bh)
412 {
413         bh->b_end_io = end_buffer_async_read;
414         set_buffer_async_read(bh);
415 }
416
417 static void mark_buffer_async_write_endio(struct buffer_head *bh,
418                                           bh_end_io_t *handler)
419 {
420         bh->b_end_io = handler;
421         set_buffer_async_write(bh);
422 }
423
424 void mark_buffer_async_write(struct buffer_head *bh)
425 {
426         mark_buffer_async_write_endio(bh, end_buffer_async_write);
427 }
428 EXPORT_SYMBOL(mark_buffer_async_write);
429
430
431 /*
432  * fs/buffer.c contains helper functions for buffer-backed address space's
433  * fsync functions.  A common requirement for buffer-based filesystems is
434  * that certain data from the backing blockdev needs to be written out for
435  * a successful fsync().  For example, ext2 indirect blocks need to be
436  * written back and waited upon before fsync() returns.
437  *
438  * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
439  * inode_has_buffers() and invalidate_inode_buffers() are provided for the
440  * management of a list of dependent buffers at ->i_mapping->private_list.
441  *
442  * Locking is a little subtle: try_to_free_buffers() will remove buffers
443  * from their controlling inode's queue when they are being freed.  But
444  * try_to_free_buffers() will be operating against the *blockdev* mapping
445  * at the time, not against the S_ISREG file which depends on those buffers.
446  * So the locking for private_list is via the private_lock in the address_space
447  * which backs the buffers.  Which is different from the address_space 
448  * against which the buffers are listed.  So for a particular address_space,
449  * mapping->private_lock does *not* protect mapping->private_list!  In fact,
450  * mapping->private_list will always be protected by the backing blockdev's
451  * ->private_lock.
452  *
453  * Which introduces a requirement: all buffers on an address_space's
454  * ->private_list must be from the same address_space: the blockdev's.
455  *
456  * address_spaces which do not place buffers at ->private_list via these
457  * utility functions are free to use private_lock and private_list for
458  * whatever they want.  The only requirement is that list_empty(private_list)
459  * be true at clear_inode() time.
460  *
461  * FIXME: clear_inode should not call invalidate_inode_buffers().  The
462  * filesystems should do that.  invalidate_inode_buffers() should just go
463  * BUG_ON(!list_empty).
464  *
465  * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
466  * take an address_space, not an inode.  And it should be called
467  * mark_buffer_dirty_fsync() to clearly define why those buffers are being
468  * queued up.
469  *
470  * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
471  * list if it is already on a list.  Because if the buffer is on a list,
472  * it *must* already be on the right one.  If not, the filesystem is being
473  * silly.  This will save a ton of locking.  But first we have to ensure
474  * that buffers are taken *off* the old inode's list when they are freed
475  * (presumably in truncate).  That requires careful auditing of all
476  * filesystems (do it inside bforget()).  It could also be done by bringing
477  * b_inode back.
478  */
479
480 /*
481  * The buffer's backing address_space's private_lock must be held
482  */
483 static void __remove_assoc_queue(struct buffer_head *bh)
484 {
485         list_del_init(&bh->b_assoc_buffers);
486         WARN_ON(!bh->b_assoc_map);
487         if (buffer_write_io_error(bh))
488                 set_bit(AS_EIO, &bh->b_assoc_map->flags);
489         bh->b_assoc_map = NULL;
490 }
491
492 int inode_has_buffers(struct inode *inode)
493 {
494         return !list_empty(&inode->i_data.private_list);
495 }
496
497 /*
498  * osync is designed to support O_SYNC io.  It waits synchronously for
499  * all already-submitted IO to complete, but does not queue any new
500  * writes to the disk.
501  *
502  * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
503  * you dirty the buffers, and then use osync_inode_buffers to wait for
504  * completion.  Any other dirty buffers which are not yet queued for
505  * write will not be flushed to disk by the osync.
506  */
507 static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
508 {
509         struct buffer_head *bh;
510         struct list_head *p;
511         int err = 0;
512
513         spin_lock(lock);
514 repeat:
515         list_for_each_prev(p, list) {
516                 bh = BH_ENTRY(p);
517                 if (buffer_locked(bh)) {
518                         get_bh(bh);
519                         spin_unlock(lock);
520                         wait_on_buffer(bh);
521                         if (!buffer_uptodate(bh))
522                                 err = -EIO;
523                         brelse(bh);
524                         spin_lock(lock);
525                         goto repeat;
526                 }
527         }
528         spin_unlock(lock);
529         return err;
530 }
531
532 static void do_thaw_one(struct super_block *sb, void *unused)
533 {
534         char b[BDEVNAME_SIZE];
535         while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
536                 printk(KERN_WARNING "Emergency Thaw on %s\n",
537                        bdevname(sb->s_bdev, b));
538 }
539
540 static void do_thaw_all(struct work_struct *work)
541 {
542         iterate_supers(do_thaw_one, NULL);
543         kfree(work);
544         printk(KERN_WARNING "Emergency Thaw complete\n");
545 }
546
547 /**
548  * emergency_thaw_all -- forcibly thaw every frozen filesystem
549  *
550  * Used for emergency unfreeze of all filesystems via SysRq
551  */
552 void emergency_thaw_all(void)
553 {
554         struct work_struct *work;
555
556         work = kmalloc(sizeof(*work), GFP_ATOMIC);
557         if (work) {
558                 INIT_WORK(work, do_thaw_all);
559                 schedule_work(work);
560         }
561 }
562
563 /**
564  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
565  * @mapping: the mapping which wants those buffers written
566  *
567  * Starts I/O against the buffers at mapping->private_list, and waits upon
568  * that I/O.
569  *
570  * Basically, this is a convenience function for fsync().
571  * @mapping is a file or directory which needs those buffers to be written for
572  * a successful fsync().
573  */
574 int sync_mapping_buffers(struct address_space *mapping)
575 {
576         struct address_space *buffer_mapping = mapping->private_data;
577
578         if (buffer_mapping == NULL || list_empty(&mapping->private_list))
579                 return 0;
580
581         return fsync_buffers_list(&buffer_mapping->private_lock,
582                                         &mapping->private_list);
583 }
584 EXPORT_SYMBOL(sync_mapping_buffers);
585
586 /*
587  * Called when we've recently written block `bblock', and it is known that
588  * `bblock' was for a buffer_boundary() buffer.  This means that the block at
589  * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
590  * dirty, schedule it for IO.  So that indirects merge nicely with their data.
591  */
592 void write_boundary_block(struct block_device *bdev,
593                         sector_t bblock, unsigned blocksize)
594 {
595         struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
596         if (bh) {
597                 if (buffer_dirty(bh))
598                         ll_rw_block(WRITE, 1, &bh);
599                 put_bh(bh);
600         }
601 }
602
603 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
604 {
605         struct address_space *mapping = inode->i_mapping;
606         struct address_space *buffer_mapping = bh->b_page->mapping;
607
608         mark_buffer_dirty(bh);
609         if (!mapping->private_data) {
610                 mapping->private_data = buffer_mapping;
611         } else {
612                 BUG_ON(mapping->private_data != buffer_mapping);
613         }
614         if (!bh->b_assoc_map) {
615                 spin_lock(&buffer_mapping->private_lock);
616                 list_move_tail(&bh->b_assoc_buffers,
617                                 &mapping->private_list);
618                 bh->b_assoc_map = mapping;
619                 spin_unlock(&buffer_mapping->private_lock);
620         }
621 }
622 EXPORT_SYMBOL(mark_buffer_dirty_inode);
623
624 /*
625  * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
626  * dirty.
627  *
628  * If warn is true, then emit a warning if the page is not uptodate and has
629  * not been truncated.
630  *
631  * The caller must hold mem_cgroup_begin_page_stat() lock.
632  */
633 static void __set_page_dirty(struct page *page, struct address_space *mapping,
634                              struct mem_cgroup *memcg, int warn)
635 {
636         unsigned long flags;
637
638         spin_lock_irqsave(&mapping->tree_lock, flags);
639         if (page->mapping) {    /* Race with truncate? */
640                 WARN_ON_ONCE(warn && !PageUptodate(page));
641                 account_page_dirtied(page, mapping, memcg);
642                 radix_tree_tag_set(&mapping->page_tree,
643                                 page_index(page), PAGECACHE_TAG_DIRTY);
644         }
645         spin_unlock_irqrestore(&mapping->tree_lock, flags);
646 }
647
648 /*
649  * Add a page to the dirty page list.
650  *
651  * It is a sad fact of life that this function is called from several places
652  * deeply under spinlocking.  It may not sleep.
653  *
654  * If the page has buffers, the uptodate buffers are set dirty, to preserve
655  * dirty-state coherency between the page and the buffers.  It the page does
656  * not have buffers then when they are later attached they will all be set
657  * dirty.
658  *
659  * The buffers are dirtied before the page is dirtied.  There's a small race
660  * window in which a writepage caller may see the page cleanness but not the
661  * buffer dirtiness.  That's fine.  If this code were to set the page dirty
662  * before the buffers, a concurrent writepage caller could clear the page dirty
663  * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
664  * page on the dirty page list.
665  *
666  * We use private_lock to lock against try_to_free_buffers while using the
667  * page's buffer list.  Also use this to protect against clean buffers being
668  * added to the page after it was set dirty.
669  *
670  * FIXME: may need to call ->reservepage here as well.  That's rather up to the
671  * address_space though.
672  */
673 int __set_page_dirty_buffers(struct page *page)
674 {
675         int newly_dirty;
676         struct mem_cgroup *memcg;
677         struct address_space *mapping = page_mapping(page);
678
679         if (unlikely(!mapping))
680                 return !TestSetPageDirty(page);
681
682         spin_lock(&mapping->private_lock);
683         if (page_has_buffers(page)) {
684                 struct buffer_head *head = page_buffers(page);
685                 struct buffer_head *bh = head;
686
687                 do {
688                         set_buffer_dirty(bh);
689                         bh = bh->b_this_page;
690                 } while (bh != head);
691         }
692         /*
693          * Use mem_group_begin_page_stat() to keep PageDirty synchronized with
694          * per-memcg dirty page counters.
695          */
696         memcg = mem_cgroup_begin_page_stat(page);
697         newly_dirty = !TestSetPageDirty(page);
698         spin_unlock(&mapping->private_lock);
699
700         if (newly_dirty)
701                 __set_page_dirty(page, mapping, memcg, 1);
702
703         mem_cgroup_end_page_stat(memcg);
704
705         if (newly_dirty)
706                 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
707
708         return newly_dirty;
709 }
710 EXPORT_SYMBOL(__set_page_dirty_buffers);
711
712 /*
713  * Write out and wait upon a list of buffers.
714  *
715  * We have conflicting pressures: we want to make sure that all
716  * initially dirty buffers get waited on, but that any subsequently
717  * dirtied buffers don't.  After all, we don't want fsync to last
718  * forever if somebody is actively writing to the file.
719  *
720  * Do this in two main stages: first we copy dirty buffers to a
721  * temporary inode list, queueing the writes as we go.  Then we clean
722  * up, waiting for those writes to complete.
723  * 
724  * During this second stage, any subsequent updates to the file may end
725  * up refiling the buffer on the original inode's dirty list again, so
726  * there is a chance we will end up with a buffer queued for write but
727  * not yet completed on that list.  So, as a final cleanup we go through
728  * the osync code to catch these locked, dirty buffers without requeuing
729  * any newly dirty buffers for write.
730  */
731 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
732 {
733         struct buffer_head *bh;
734         struct list_head tmp;
735         struct address_space *mapping;
736         int err = 0, err2;
737         struct blk_plug plug;
738
739         INIT_LIST_HEAD(&tmp);
740         blk_start_plug(&plug);
741
742         spin_lock(lock);
743         while (!list_empty(list)) {
744                 bh = BH_ENTRY(list->next);
745                 mapping = bh->b_assoc_map;
746                 __remove_assoc_queue(bh);
747                 /* Avoid race with mark_buffer_dirty_inode() which does
748                  * a lockless check and we rely on seeing the dirty bit */
749                 smp_mb();
750                 if (buffer_dirty(bh) || buffer_locked(bh)) {
751                         list_add(&bh->b_assoc_buffers, &tmp);
752                         bh->b_assoc_map = mapping;
753                         if (buffer_dirty(bh)) {
754                                 get_bh(bh);
755                                 spin_unlock(lock);
756                                 /*
757                                  * Ensure any pending I/O completes so that
758                                  * write_dirty_buffer() actually writes the
759                                  * current contents - it is a noop if I/O is
760                                  * still in flight on potentially older
761                                  * contents.
762                                  */
763                                 write_dirty_buffer(bh, WRITE_SYNC);
764
765                                 /*
766                                  * Kick off IO for the previous mapping. Note
767                                  * that we will not run the very last mapping,
768                                  * wait_on_buffer() will do that for us
769                                  * through sync_buffer().
770                                  */
771                                 brelse(bh);
772                                 spin_lock(lock);
773                         }
774                 }
775         }
776
777         spin_unlock(lock);
778         blk_finish_plug(&plug);
779         spin_lock(lock);
780
781         while (!list_empty(&tmp)) {
782                 bh = BH_ENTRY(tmp.prev);
783                 get_bh(bh);
784                 mapping = bh->b_assoc_map;
785                 __remove_assoc_queue(bh);
786                 /* Avoid race with mark_buffer_dirty_inode() which does
787                  * a lockless check and we rely on seeing the dirty bit */
788                 smp_mb();
789                 if (buffer_dirty(bh)) {
790                         list_add(&bh->b_assoc_buffers,
791                                  &mapping->private_list);
792                         bh->b_assoc_map = mapping;
793                 }
794                 spin_unlock(lock);
795                 wait_on_buffer(bh);
796                 if (!buffer_uptodate(bh))
797                         err = -EIO;
798                 brelse(bh);
799                 spin_lock(lock);
800         }
801         
802         spin_unlock(lock);
803         err2 = osync_buffers_list(lock, list);
804         if (err)
805                 return err;
806         else
807                 return err2;
808 }
809
810 /*
811  * Invalidate any and all dirty buffers on a given inode.  We are
812  * probably unmounting the fs, but that doesn't mean we have already
813  * done a sync().  Just drop the buffers from the inode list.
814  *
815  * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
816  * assumes that all the buffers are against the blockdev.  Not true
817  * for reiserfs.
818  */
819 void invalidate_inode_buffers(struct inode *inode)
820 {
821         if (inode_has_buffers(inode)) {
822                 struct address_space *mapping = &inode->i_data;
823                 struct list_head *list = &mapping->private_list;
824                 struct address_space *buffer_mapping = mapping->private_data;
825
826                 spin_lock(&buffer_mapping->private_lock);
827                 while (!list_empty(list))
828                         __remove_assoc_queue(BH_ENTRY(list->next));
829                 spin_unlock(&buffer_mapping->private_lock);
830         }
831 }
832 EXPORT_SYMBOL(invalidate_inode_buffers);
833
834 /*
835  * Remove any clean buffers from the inode's buffer list.  This is called
836  * when we're trying to free the inode itself.  Those buffers can pin it.
837  *
838  * Returns true if all buffers were removed.
839  */
840 int remove_inode_buffers(struct inode *inode)
841 {
842         int ret = 1;
843
844         if (inode_has_buffers(inode)) {
845                 struct address_space *mapping = &inode->i_data;
846                 struct list_head *list = &mapping->private_list;
847                 struct address_space *buffer_mapping = mapping->private_data;
848
849                 spin_lock(&buffer_mapping->private_lock);
850                 while (!list_empty(list)) {
851                         struct buffer_head *bh = BH_ENTRY(list->next);
852                         if (buffer_dirty(bh)) {
853                                 ret = 0;
854                                 break;
855                         }
856                         __remove_assoc_queue(bh);
857                 }
858                 spin_unlock(&buffer_mapping->private_lock);
859         }
860         return ret;
861 }
862
863 /*
864  * Create the appropriate buffers when given a page for data area and
865  * the size of each buffer.. Use the bh->b_this_page linked list to
866  * follow the buffers created.  Return NULL if unable to create more
867  * buffers.
868  *
869  * The retry flag is used to differentiate async IO (paging, swapping)
870  * which may not fail from ordinary buffer allocations.
871  */
872 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
873                 int retry)
874 {
875         struct buffer_head *bh, *head;
876         long offset;
877
878 try_again:
879         head = NULL;
880         offset = PAGE_SIZE;
881         while ((offset -= size) >= 0) {
882                 bh = alloc_buffer_head(GFP_NOFS);
883                 if (!bh)
884                         goto no_grow;
885
886                 bh->b_this_page = head;
887                 bh->b_blocknr = -1;
888                 head = bh;
889
890                 bh->b_size = size;
891
892                 /* Link the buffer to its page */
893                 set_bh_page(bh, page, offset);
894         }
895         return head;
896 /*
897  * In case anything failed, we just free everything we got.
898  */
899 no_grow:
900         if (head) {
901                 do {
902                         bh = head;
903                         head = head->b_this_page;
904                         free_buffer_head(bh);
905                 } while (head);
906         }
907
908         /*
909          * Return failure for non-async IO requests.  Async IO requests
910          * are not allowed to fail, so we have to wait until buffer heads
911          * become available.  But we don't want tasks sleeping with 
912          * partially complete buffers, so all were released above.
913          */
914         if (!retry)
915                 return NULL;
916
917         /* We're _really_ low on memory. Now we just
918          * wait for old buffer heads to become free due to
919          * finishing IO.  Since this is an async request and
920          * the reserve list is empty, we're sure there are 
921          * async buffer heads in use.
922          */
923         free_more_memory();
924         goto try_again;
925 }
926 EXPORT_SYMBOL_GPL(alloc_page_buffers);
927
928 static inline void
929 link_dev_buffers(struct page *page, struct buffer_head *head)
930 {
931         struct buffer_head *bh, *tail;
932
933         bh = head;
934         do {
935                 tail = bh;
936                 bh = bh->b_this_page;
937         } while (bh);
938         tail->b_this_page = head;
939         attach_page_buffers(page, head);
940 }
941
942 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
943 {
944         sector_t retval = ~((sector_t)0);
945         loff_t sz = i_size_read(bdev->bd_inode);
946
947         if (sz) {
948                 unsigned int sizebits = blksize_bits(size);
949                 retval = (sz >> sizebits);
950         }
951         return retval;
952 }
953
954 /*
955  * Initialise the state of a blockdev page's buffers.
956  */ 
957 static sector_t
958 init_page_buffers(struct page *page, struct block_device *bdev,
959                         sector_t block, int size)
960 {
961         struct buffer_head *head = page_buffers(page);
962         struct buffer_head *bh = head;
963         int uptodate = PageUptodate(page);
964         sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
965
966         do {
967                 if (!buffer_mapped(bh)) {
968                         init_buffer(bh, NULL, NULL);
969                         bh->b_bdev = bdev;
970                         bh->b_blocknr = block;
971                         if (uptodate)
972                                 set_buffer_uptodate(bh);
973                         if (block < end_block)
974                                 set_buffer_mapped(bh);
975                 }
976                 block++;
977                 bh = bh->b_this_page;
978         } while (bh != head);
979
980         /*
981          * Caller needs to validate requested block against end of device.
982          */
983         return end_block;
984 }
985
986 /*
987  * Create the page-cache page that contains the requested block.
988  *
989  * This is used purely for blockdev mappings.
990  */
991 static int
992 grow_dev_page(struct block_device *bdev, sector_t block,
993               pgoff_t index, int size, int sizebits, gfp_t gfp)
994 {
995         struct inode *inode = bdev->bd_inode;
996         struct page *page;
997         struct buffer_head *bh;
998         sector_t end_block;
999         int ret = 0;            /* Will call free_more_memory() */
1000         gfp_t gfp_mask;
1001
1002         gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
1003
1004         /*
1005          * XXX: __getblk_slow() can not really deal with failure and
1006          * will endlessly loop on improvised global reclaim.  Prefer
1007          * looping in the allocator rather than here, at least that
1008          * code knows what it's doing.
1009          */
1010         gfp_mask |= __GFP_NOFAIL;
1011
1012         page = find_or_create_page(inode->i_mapping, index, gfp_mask);
1013         if (!page)
1014                 return ret;
1015
1016         BUG_ON(!PageLocked(page));
1017
1018         if (page_has_buffers(page)) {
1019                 bh = page_buffers(page);
1020                 if (bh->b_size == size) {
1021                         end_block = init_page_buffers(page, bdev,
1022                                                 (sector_t)index << sizebits,
1023                                                 size);
1024                         goto done;
1025                 }
1026                 if (!try_to_free_buffers(page))
1027                         goto failed;
1028         }
1029
1030         /*
1031          * Allocate some buffers for this page
1032          */
1033         bh = alloc_page_buffers(page, size, 0);
1034         if (!bh)
1035                 goto failed;
1036
1037         /*
1038          * Link the page to the buffers and initialise them.  Take the
1039          * lock to be atomic wrt __find_get_block(), which does not
1040          * run under the page lock.
1041          */
1042         spin_lock(&inode->i_mapping->private_lock);
1043         link_dev_buffers(page, bh);
1044         end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
1045                         size);
1046         spin_unlock(&inode->i_mapping->private_lock);
1047 done:
1048         ret = (block < end_block) ? 1 : -ENXIO;
1049 failed:
1050         unlock_page(page);
1051         page_cache_release(page);
1052         return ret;
1053 }
1054
1055 /*
1056  * Create buffers for the specified block device block's page.  If
1057  * that page was dirty, the buffers are set dirty also.
1058  */
1059 static int
1060 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1061 {
1062         pgoff_t index;
1063         int sizebits;
1064
1065         sizebits = -1;
1066         do {
1067                 sizebits++;
1068         } while ((size << sizebits) < PAGE_SIZE);
1069
1070         index = block >> sizebits;
1071
1072         /*
1073          * Check for a block which wants to lie outside our maximum possible
1074          * pagecache index.  (this comparison is done using sector_t types).
1075          */
1076         if (unlikely(index != block >> sizebits)) {
1077                 char b[BDEVNAME_SIZE];
1078
1079                 printk(KERN_ERR "%s: requested out-of-range block %llu for "
1080                         "device %s\n",
1081                         __func__, (unsigned long long)block,
1082                         bdevname(bdev, b));
1083                 return -EIO;
1084         }
1085
1086         /* Create a page with the proper size buffers.. */
1087         return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1088 }
1089
1090 struct buffer_head *
1091 __getblk_slow(struct block_device *bdev, sector_t block,
1092              unsigned size, gfp_t gfp)
1093 {
1094         /* Size must be multiple of hard sectorsize */
1095         if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1096                         (size < 512 || size > PAGE_SIZE))) {
1097                 printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1098                                         size);
1099                 printk(KERN_ERR "logical block size: %d\n",
1100                                         bdev_logical_block_size(bdev));
1101
1102                 dump_stack();
1103                 return NULL;
1104         }
1105
1106         for (;;) {
1107                 struct buffer_head *bh;
1108                 int ret;
1109
1110                 bh = __find_get_block(bdev, block, size);
1111                 if (bh)
1112                         return bh;
1113
1114                 ret = grow_buffers(bdev, block, size, gfp);
1115                 if (ret < 0)
1116                         return NULL;
1117                 if (ret == 0)
1118                         free_more_memory();
1119         }
1120 }
1121 EXPORT_SYMBOL(__getblk_slow);
1122
1123 /*
1124  * The relationship between dirty buffers and dirty pages:
1125  *
1126  * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1127  * the page is tagged dirty in its radix tree.
1128  *
1129  * At all times, the dirtiness of the buffers represents the dirtiness of
1130  * subsections of the page.  If the page has buffers, the page dirty bit is
1131  * merely a hint about the true dirty state.
1132  *
1133  * When a page is set dirty in its entirety, all its buffers are marked dirty
1134  * (if the page has buffers).
1135  *
1136  * When a buffer is marked dirty, its page is dirtied, but the page's other
1137  * buffers are not.
1138  *
1139  * Also.  When blockdev buffers are explicitly read with bread(), they
1140  * individually become uptodate.  But their backing page remains not
1141  * uptodate - even if all of its buffers are uptodate.  A subsequent
1142  * block_read_full_page() against that page will discover all the uptodate
1143  * buffers, will set the page uptodate and will perform no I/O.
1144  */
1145
1146 /**
1147  * mark_buffer_dirty - mark a buffer_head as needing writeout
1148  * @bh: the buffer_head to mark dirty
1149  *
1150  * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
1151  * backing page dirty, then tag the page as dirty in its address_space's radix
1152  * tree and then attach the address_space's inode to its superblock's dirty
1153  * inode list.
1154  *
1155  * mark_buffer_dirty() is atomic.  It takes bh->b_page->mapping->private_lock,
1156  * mapping->tree_lock and mapping->host->i_lock.
1157  */
1158 void mark_buffer_dirty(struct buffer_head *bh)
1159 {
1160         WARN_ON_ONCE(!buffer_uptodate(bh));
1161
1162         trace_block_dirty_buffer(bh);
1163
1164         /*
1165          * Very *carefully* optimize the it-is-already-dirty case.
1166          *
1167          * Don't let the final "is it dirty" escape to before we
1168          * perhaps modified the buffer.
1169          */
1170         if (buffer_dirty(bh)) {
1171                 smp_mb();
1172                 if (buffer_dirty(bh))
1173                         return;
1174         }
1175
1176         if (!test_set_buffer_dirty(bh)) {
1177                 struct page *page = bh->b_page;
1178                 struct address_space *mapping = NULL;
1179                 struct mem_cgroup *memcg;
1180
1181                 memcg = mem_cgroup_begin_page_stat(page);
1182                 if (!TestSetPageDirty(page)) {
1183                         mapping = page_mapping(page);
1184                         if (mapping)
1185                                 __set_page_dirty(page, mapping, memcg, 0);
1186                 }
1187                 mem_cgroup_end_page_stat(memcg);
1188                 if (mapping)
1189                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1190         }
1191 }
1192 EXPORT_SYMBOL(mark_buffer_dirty);
1193
1194 /*
1195  * Decrement a buffer_head's reference count.  If all buffers against a page
1196  * have zero reference count, are clean and unlocked, and if the page is clean
1197  * and unlocked then try_to_free_buffers() may strip the buffers from the page
1198  * in preparation for freeing it (sometimes, rarely, buffers are removed from
1199  * a page but it ends up not being freed, and buffers may later be reattached).
1200  */
1201 void __brelse(struct buffer_head * buf)
1202 {
1203         if (atomic_read(&buf->b_count)) {
1204                 put_bh(buf);
1205                 return;
1206         }
1207         WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1208 }
1209 EXPORT_SYMBOL(__brelse);
1210
1211 /*
1212  * bforget() is like brelse(), except it discards any
1213  * potentially dirty data.
1214  */
1215 void __bforget(struct buffer_head *bh)
1216 {
1217         clear_buffer_dirty(bh);
1218         if (bh->b_assoc_map) {
1219                 struct address_space *buffer_mapping = bh->b_page->mapping;
1220
1221                 spin_lock(&buffer_mapping->private_lock);
1222                 list_del_init(&bh->b_assoc_buffers);
1223                 bh->b_assoc_map = NULL;
1224                 spin_unlock(&buffer_mapping->private_lock);
1225         }
1226         __brelse(bh);
1227 }
1228 EXPORT_SYMBOL(__bforget);
1229
1230 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1231 {
1232         lock_buffer(bh);
1233         if (buffer_uptodate(bh)) {
1234                 unlock_buffer(bh);
1235                 return bh;
1236         } else {
1237                 get_bh(bh);
1238                 bh->b_end_io = end_buffer_read_sync;
1239                 submit_bh(READ, bh);
1240                 wait_on_buffer(bh);
1241                 if (buffer_uptodate(bh))
1242                         return bh;
1243         }
1244         brelse(bh);
1245         return NULL;
1246 }
1247
1248 /*
1249  * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1250  * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1251  * refcount elevated by one when they're in an LRU.  A buffer can only appear
1252  * once in a particular CPU's LRU.  A single buffer can be present in multiple
1253  * CPU's LRUs at the same time.
1254  *
1255  * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1256  * sb_find_get_block().
1257  *
1258  * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1259  * a local interrupt disable for that.
1260  */
1261
1262 #define BH_LRU_SIZE     16
1263
1264 struct bh_lru {
1265         struct buffer_head *bhs[BH_LRU_SIZE];
1266 };
1267
1268 static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1269
1270 #ifdef CONFIG_SMP
1271 #define bh_lru_lock()   local_irq_disable()
1272 #define bh_lru_unlock() local_irq_enable()
1273 #else
1274 #define bh_lru_lock()   preempt_disable()
1275 #define bh_lru_unlock() preempt_enable()
1276 #endif
1277
1278 static inline void check_irqs_on(void)
1279 {
1280 #ifdef irqs_disabled
1281         BUG_ON(irqs_disabled());
1282 #endif
1283 }
1284
1285 /*
1286  * The LRU management algorithm is dopey-but-simple.  Sorry.
1287  */
1288 static void bh_lru_install(struct buffer_head *bh)
1289 {
1290         struct buffer_head *evictee = NULL;
1291
1292         check_irqs_on();
1293         bh_lru_lock();
1294         if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
1295                 struct buffer_head *bhs[BH_LRU_SIZE];
1296                 int in;
1297                 int out = 0;
1298
1299                 get_bh(bh);
1300                 bhs[out++] = bh;
1301                 for (in = 0; in < BH_LRU_SIZE; in++) {
1302                         struct buffer_head *bh2 =
1303                                 __this_cpu_read(bh_lrus.bhs[in]);
1304
1305                         if (bh2 == bh) {
1306                                 __brelse(bh2);
1307                         } else {
1308                                 if (out >= BH_LRU_SIZE) {
1309                                         BUG_ON(evictee != NULL);
1310                                         evictee = bh2;
1311                                 } else {
1312                                         bhs[out++] = bh2;
1313                                 }
1314                         }
1315                 }
1316                 while (out < BH_LRU_SIZE)
1317                         bhs[out++] = NULL;
1318                 memcpy(this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
1319         }
1320         bh_lru_unlock();
1321
1322         if (evictee)
1323                 __brelse(evictee);
1324 }
1325
1326 /*
1327  * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1328  */
1329 static struct buffer_head *
1330 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1331 {
1332         struct buffer_head *ret = NULL;
1333         unsigned int i;
1334
1335         check_irqs_on();
1336         bh_lru_lock();
1337         for (i = 0; i < BH_LRU_SIZE; i++) {
1338                 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1339
1340                 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1341                     bh->b_size == size) {
1342                         if (i) {
1343                                 while (i) {
1344                                         __this_cpu_write(bh_lrus.bhs[i],
1345                                                 __this_cpu_read(bh_lrus.bhs[i - 1]));
1346                                         i--;
1347                                 }
1348                                 __this_cpu_write(bh_lrus.bhs[0], bh);
1349                         }
1350                         get_bh(bh);
1351                         ret = bh;
1352                         break;
1353                 }
1354         }
1355         bh_lru_unlock();
1356         return ret;
1357 }
1358
1359 /*
1360  * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1361  * it in the LRU and mark it as accessed.  If it is not present then return
1362  * NULL
1363  */
1364 struct buffer_head *
1365 __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1366 {
1367         struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1368
1369         if (bh == NULL) {
1370                 /* __find_get_block_slow will mark the page accessed */
1371                 bh = __find_get_block_slow(bdev, block);
1372                 if (bh)
1373                         bh_lru_install(bh);
1374         } else
1375                 touch_buffer(bh);
1376
1377         return bh;
1378 }
1379 EXPORT_SYMBOL(__find_get_block);
1380
1381 /*
1382  * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1383  * which corresponds to the passed block_device, block and size. The
1384  * returned buffer has its reference count incremented.
1385  *
1386  * __getblk_gfp() will lock up the machine if grow_dev_page's
1387  * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1388  */
1389 struct buffer_head *
1390 __getblk_gfp(struct block_device *bdev, sector_t block,
1391              unsigned size, gfp_t gfp)
1392 {
1393         struct buffer_head *bh = __find_get_block(bdev, block, size);
1394
1395         might_sleep();
1396         if (bh == NULL)
1397                 bh = __getblk_slow(bdev, block, size, gfp);
1398         return bh;
1399 }
1400 EXPORT_SYMBOL(__getblk_gfp);
1401
1402 /*
1403  * Do async read-ahead on a buffer..
1404  */
1405 void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1406 {
1407         struct buffer_head *bh = __getblk(bdev, block, size);
1408         if (likely(bh)) {
1409                 ll_rw_block(READA, 1, &bh);
1410                 brelse(bh);
1411         }
1412 }
1413 EXPORT_SYMBOL(__breadahead);
1414
1415 /**
1416  *  __bread_gfp() - reads a specified block and returns the bh
1417  *  @bdev: the block_device to read from
1418  *  @block: number of block
1419  *  @size: size (in bytes) to read
1420  *  @gfp: page allocation flag
1421  *
1422  *  Reads a specified block, and returns buffer head that contains it.
1423  *  The page cache can be allocated from non-movable area
1424  *  not to prevent page migration if you set gfp to zero.
1425  *  It returns NULL if the block was unreadable.
1426  */
1427 struct buffer_head *
1428 __bread_gfp(struct block_device *bdev, sector_t block,
1429                    unsigned size, gfp_t gfp)
1430 {
1431         struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1432
1433         if (likely(bh) && !buffer_uptodate(bh))
1434                 bh = __bread_slow(bh);
1435         return bh;
1436 }
1437 EXPORT_SYMBOL(__bread_gfp);
1438
1439 /*
1440  * invalidate_bh_lrus() is called rarely - but not only at unmount.
1441  * This doesn't race because it runs in each cpu either in irq
1442  * or with preempt disabled.
1443  */
1444 static void invalidate_bh_lru(void *arg)
1445 {
1446         struct bh_lru *b = &get_cpu_var(bh_lrus);
1447         int i;
1448
1449         for (i = 0; i < BH_LRU_SIZE; i++) {
1450                 brelse(b->bhs[i]);
1451                 b->bhs[i] = NULL;
1452         }
1453         put_cpu_var(bh_lrus);
1454 }
1455
1456 static bool has_bh_in_lru(int cpu, void *dummy)
1457 {
1458         struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1459         int i;
1460         
1461         for (i = 0; i < BH_LRU_SIZE; i++) {
1462                 if (b->bhs[i])
1463                         return 1;
1464         }
1465
1466         return 0;
1467 }
1468
1469 void invalidate_bh_lrus(void)
1470 {
1471         on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
1472 }
1473 EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1474
1475 void set_bh_page(struct buffer_head *bh,
1476                 struct page *page, unsigned long offset)
1477 {
1478         bh->b_page = page;
1479         BUG_ON(offset >= PAGE_SIZE);
1480         if (PageHighMem(page))
1481                 /*
1482                  * This catches illegal uses and preserves the offset:
1483                  */
1484                 bh->b_data = (char *)(0 + offset);
1485         else
1486                 bh->b_data = page_address(page) + offset;
1487 }
1488 EXPORT_SYMBOL(set_bh_page);
1489
1490 /*
1491  * Called when truncating a buffer on a page completely.
1492  */
1493
1494 /* Bits that are cleared during an invalidate */
1495 #define BUFFER_FLAGS_DISCARD \
1496         (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1497          1 << BH_Delay | 1 << BH_Unwritten)
1498
1499 static void discard_buffer(struct buffer_head * bh)
1500 {
1501         unsigned long b_state, b_state_old;
1502
1503         lock_buffer(bh);
1504         clear_buffer_dirty(bh);
1505         bh->b_bdev = NULL;
1506         b_state = bh->b_state;
1507         for (;;) {
1508                 b_state_old = cmpxchg(&bh->b_state, b_state,
1509                                       (b_state & ~BUFFER_FLAGS_DISCARD));
1510                 if (b_state_old == b_state)
1511                         break;
1512                 b_state = b_state_old;
1513         }
1514         unlock_buffer(bh);
1515 }
1516
1517 /**
1518  * block_invalidatepage - invalidate part or all of a buffer-backed page
1519  *
1520  * @page: the page which is affected
1521  * @offset: start of the range to invalidate
1522  * @length: length of the range to invalidate
1523  *
1524  * block_invalidatepage() is called when all or part of the page has become
1525  * invalidated by a truncate operation.
1526  *
1527  * block_invalidatepage() does not have to release all buffers, but it must
1528  * ensure that no dirty buffer is left outside @offset and that no I/O
1529  * is underway against any of the blocks which are outside the truncation
1530  * point.  Because the caller is about to free (and possibly reuse) those
1531  * blocks on-disk.
1532  */
1533 void block_invalidatepage(struct page *page, unsigned int offset,
1534                           unsigned int length)
1535 {
1536         struct buffer_head *head, *bh, *next;
1537         unsigned int curr_off = 0;
1538         unsigned int stop = length + offset;
1539
1540         BUG_ON(!PageLocked(page));
1541         if (!page_has_buffers(page))
1542                 goto out;
1543
1544         /*
1545          * Check for overflow
1546          */
1547         BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
1548
1549         head = page_buffers(page);
1550         bh = head;
1551         do {
1552                 unsigned int next_off = curr_off + bh->b_size;
1553                 next = bh->b_this_page;
1554
1555                 /*
1556                  * Are we still fully in range ?
1557                  */
1558                 if (next_off > stop)
1559                         goto out;
1560
1561                 /*
1562                  * is this block fully invalidated?
1563                  */
1564                 if (offset <= curr_off)
1565                         discard_buffer(bh);
1566                 curr_off = next_off;
1567                 bh = next;
1568         } while (bh != head);
1569
1570         /*
1571          * We release buffers only if the entire page is being invalidated.
1572          * The get_block cached value has been unconditionally invalidated,
1573          * so real IO is not possible anymore.
1574          */
1575         if (offset == 0)
1576                 try_to_release_page(page, 0);
1577 out:
1578         return;
1579 }
1580 EXPORT_SYMBOL(block_invalidatepage);
1581
1582
1583 /*
1584  * We attach and possibly dirty the buffers atomically wrt
1585  * __set_page_dirty_buffers() via private_lock.  try_to_free_buffers
1586  * is already excluded via the page lock.
1587  */
1588 void create_empty_buffers(struct page *page,
1589                         unsigned long blocksize, unsigned long b_state)
1590 {
1591         struct buffer_head *bh, *head, *tail;
1592
1593         head = alloc_page_buffers(page, blocksize, 1);
1594         bh = head;
1595         do {
1596                 bh->b_state |= b_state;
1597                 tail = bh;
1598                 bh = bh->b_this_page;
1599         } while (bh);
1600         tail->b_this_page = head;
1601
1602         spin_lock(&page->mapping->private_lock);
1603         if (PageUptodate(page) || PageDirty(page)) {
1604                 bh = head;
1605                 do {
1606                         if (PageDirty(page))
1607                                 set_buffer_dirty(bh);
1608                         if (PageUptodate(page))
1609                                 set_buffer_uptodate(bh);
1610                         bh = bh->b_this_page;
1611                 } while (bh != head);
1612         }
1613         attach_page_buffers(page, head);
1614         spin_unlock(&page->mapping->private_lock);
1615 }
1616 EXPORT_SYMBOL(create_empty_buffers);
1617
1618 /*
1619  * We are taking a block for data and we don't want any output from any
1620  * buffer-cache aliases starting from return from that function and
1621  * until the moment when something will explicitly mark the buffer
1622  * dirty (hopefully that will not happen until we will free that block ;-)
1623  * We don't even need to mark it not-uptodate - nobody can expect
1624  * anything from a newly allocated buffer anyway. We used to used
1625  * unmap_buffer() for such invalidation, but that was wrong. We definitely
1626  * don't want to mark the alias unmapped, for example - it would confuse
1627  * anyone who might pick it with bread() afterwards...
1628  *
1629  * Also..  Note that bforget() doesn't lock the buffer.  So there can
1630  * be writeout I/O going on against recently-freed buffers.  We don't
1631  * wait on that I/O in bforget() - it's more efficient to wait on the I/O
1632  * only if we really need to.  That happens here.
1633  */
1634 void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
1635 {
1636         struct buffer_head *old_bh;
1637
1638         might_sleep();
1639
1640         old_bh = __find_get_block_slow(bdev, block);
1641         if (old_bh) {
1642                 clear_buffer_dirty(old_bh);
1643                 wait_on_buffer(old_bh);
1644                 clear_buffer_req(old_bh);
1645                 __brelse(old_bh);
1646         }
1647 }
1648 EXPORT_SYMBOL(unmap_underlying_metadata);
1649
1650 /*
1651  * Size is a power-of-two in the range 512..PAGE_SIZE,
1652  * and the case we care about most is PAGE_SIZE.
1653  *
1654  * So this *could* possibly be written with those
1655  * constraints in mind (relevant mostly if some
1656  * architecture has a slow bit-scan instruction)
1657  */
1658 static inline int block_size_bits(unsigned int blocksize)
1659 {
1660         return ilog2(blocksize);
1661 }
1662
1663 static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
1664 {
1665         BUG_ON(!PageLocked(page));
1666
1667         if (!page_has_buffers(page))
1668                 create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
1669         return page_buffers(page);
1670 }
1671
1672 /*
1673  * NOTE! All mapped/uptodate combinations are valid:
1674  *
1675  *      Mapped  Uptodate        Meaning
1676  *
1677  *      No      No              "unknown" - must do get_block()
1678  *      No      Yes             "hole" - zero-filled
1679  *      Yes     No              "allocated" - allocated on disk, not read in
1680  *      Yes     Yes             "valid" - allocated and up-to-date in memory.
1681  *
1682  * "Dirty" is valid only with the last case (mapped+uptodate).
1683  */
1684
1685 /*
1686  * While block_write_full_page is writing back the dirty buffers under
1687  * the page lock, whoever dirtied the buffers may decide to clean them
1688  * again at any time.  We handle that by only looking at the buffer
1689  * state inside lock_buffer().
1690  *
1691  * If block_write_full_page() is called for regular writeback
1692  * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1693  * locked buffer.   This only can happen if someone has written the buffer
1694  * directly, with submit_bh().  At the address_space level PageWriteback
1695  * prevents this contention from occurring.
1696  *
1697  * If block_write_full_page() is called with wbc->sync_mode ==
1698  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
1699  * causes the writes to be flagged as synchronous writes.
1700  */
1701 static int __block_write_full_page(struct inode *inode, struct page *page,
1702                         get_block_t *get_block, struct writeback_control *wbc,
1703                         bh_end_io_t *handler)
1704 {
1705         int err;
1706         sector_t block;
1707         sector_t last_block;
1708         struct buffer_head *bh, *head;
1709         unsigned int blocksize, bbits;
1710         int nr_underway = 0;
1711         int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
1712
1713         head = create_page_buffers(page, inode,
1714                                         (1 << BH_Dirty)|(1 << BH_Uptodate));
1715
1716         /*
1717          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
1718          * here, and the (potentially unmapped) buffers may become dirty at
1719          * any time.  If a buffer becomes dirty here after we've inspected it
1720          * then we just miss that fact, and the page stays dirty.
1721          *
1722          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1723          * handle that here by just cleaning them.
1724          */
1725
1726         bh = head;
1727         blocksize = bh->b_size;
1728         bbits = block_size_bits(blocksize);
1729
1730         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1731         last_block = (i_size_read(inode) - 1) >> bbits;
1732
1733         /*
1734          * Get all the dirty buffers mapped to disk addresses and
1735          * handle any aliases from the underlying blockdev's mapping.
1736          */
1737         do {
1738                 if (block > last_block) {
1739                         /*
1740                          * mapped buffers outside i_size will occur, because
1741                          * this page can be outside i_size when there is a
1742                          * truncate in progress.
1743                          */
1744                         /*
1745                          * The buffer was zeroed by block_write_full_page()
1746                          */
1747                         clear_buffer_dirty(bh);
1748                         set_buffer_uptodate(bh);
1749                 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1750                            buffer_dirty(bh)) {
1751                         WARN_ON(bh->b_size != blocksize);
1752                         err = get_block(inode, block, bh, 1);
1753                         if (err)
1754                                 goto recover;
1755                         clear_buffer_delay(bh);
1756                         if (buffer_new(bh)) {
1757                                 /* blockdev mappings never come here */
1758                                 clear_buffer_new(bh);
1759                                 unmap_underlying_metadata(bh->b_bdev,
1760                                                         bh->b_blocknr);
1761                         }
1762                 }
1763                 bh = bh->b_this_page;
1764                 block++;
1765         } while (bh != head);
1766
1767         do {
1768                 if (!buffer_mapped(bh))
1769                         continue;
1770                 /*
1771                  * If it's a fully non-blocking write attempt and we cannot
1772                  * lock the buffer then redirty the page.  Note that this can
1773                  * potentially cause a busy-wait loop from writeback threads
1774                  * and kswapd activity, but those code paths have their own
1775                  * higher-level throttling.
1776                  */
1777                 if (wbc->sync_mode != WB_SYNC_NONE) {
1778                         lock_buffer(bh);
1779                 } else if (!trylock_buffer(bh)) {
1780                         redirty_page_for_writepage(wbc, page);
1781                         continue;
1782                 }
1783                 if (test_clear_buffer_dirty(bh)) {
1784                         mark_buffer_async_write_endio(bh, handler);
1785                 } else {
1786                         unlock_buffer(bh);
1787                 }
1788         } while ((bh = bh->b_this_page) != head);
1789
1790         /*
1791          * The page and its buffers are protected by PageWriteback(), so we can
1792          * drop the bh refcounts early.
1793          */
1794         BUG_ON(PageWriteback(page));
1795         set_page_writeback(page);
1796
1797         do {
1798                 struct buffer_head *next = bh->b_this_page;
1799                 if (buffer_async_write(bh)) {
1800                         submit_bh_wbc(write_op, bh, 0, wbc);
1801                         nr_underway++;
1802                 }
1803                 bh = next;
1804         } while (bh != head);
1805         unlock_page(page);
1806
1807         err = 0;
1808 done:
1809         if (nr_underway == 0) {
1810                 /*
1811                  * The page was marked dirty, but the buffers were
1812                  * clean.  Someone wrote them back by hand with
1813                  * ll_rw_block/submit_bh.  A rare case.
1814                  */
1815                 end_page_writeback(page);
1816
1817                 /*
1818                  * The page and buffer_heads can be released at any time from
1819                  * here on.
1820                  */
1821         }
1822         return err;
1823
1824 recover:
1825         /*
1826          * ENOSPC, or some other error.  We may already have added some
1827          * blocks to the file, so we need to write these out to avoid
1828          * exposing stale data.
1829          * The page is currently locked and not marked for writeback
1830          */
1831         bh = head;
1832         /* Recovery: lock and submit the mapped buffers */
1833         do {
1834                 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1835                     !buffer_delay(bh)) {
1836                         lock_buffer(bh);
1837                         mark_buffer_async_write_endio(bh, handler);
1838                 } else {
1839                         /*
1840                          * The buffer may have been set dirty during
1841                          * attachment to a dirty page.
1842                          */
1843                         clear_buffer_dirty(bh);
1844                 }
1845         } while ((bh = bh->b_this_page) != head);
1846         SetPageError(page);
1847         BUG_ON(PageWriteback(page));
1848         mapping_set_error(page->mapping, err);
1849         set_page_writeback(page);
1850         do {
1851                 struct buffer_head *next = bh->b_this_page;
1852                 if (buffer_async_write(bh)) {
1853                         clear_buffer_dirty(bh);
1854                         submit_bh_wbc(write_op, bh, 0, wbc);
1855                         nr_underway++;
1856                 }
1857                 bh = next;
1858         } while (bh != head);
1859         unlock_page(page);
1860         goto done;
1861 }
1862
1863 /*
1864  * If a page has any new buffers, zero them out here, and mark them uptodate
1865  * and dirty so they'll be written out (in order to prevent uninitialised
1866  * block data from leaking). And clear the new bit.
1867  */
1868 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
1869 {
1870         unsigned int block_start, block_end;
1871         struct buffer_head *head, *bh;
1872
1873         BUG_ON(!PageLocked(page));
1874         if (!page_has_buffers(page))
1875                 return;
1876
1877         bh = head = page_buffers(page);
1878         block_start = 0;
1879         do {
1880                 block_end = block_start + bh->b_size;
1881
1882                 if (buffer_new(bh)) {
1883                         if (block_end > from && block_start < to) {
1884                                 if (!PageUptodate(page)) {
1885                                         unsigned start, size;
1886
1887                                         start = max(from, block_start);
1888                                         size = min(to, block_end) - start;
1889
1890                                         zero_user(page, start, size);
1891                                         set_buffer_uptodate(bh);
1892                                 }
1893
1894                                 clear_buffer_new(bh);
1895                                 mark_buffer_dirty(bh);
1896                         }
1897                 }
1898
1899                 block_start = block_end;
1900                 bh = bh->b_this_page;
1901         } while (bh != head);
1902 }
1903 EXPORT_SYMBOL(page_zero_new_buffers);
1904
1905 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1906                 get_block_t *get_block)
1907 {
1908         unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1909         unsigned to = from + len;
1910         struct inode *inode = page->mapping->host;
1911         unsigned block_start, block_end;
1912         sector_t block;
1913         int err = 0;
1914         unsigned blocksize, bbits;
1915         struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1916
1917         BUG_ON(!PageLocked(page));
1918         BUG_ON(from > PAGE_CACHE_SIZE);
1919         BUG_ON(to > PAGE_CACHE_SIZE);
1920         BUG_ON(from > to);
1921
1922         head = create_page_buffers(page, inode, 0);
1923         blocksize = head->b_size;
1924         bbits = block_size_bits(blocksize);
1925
1926         block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
1927
1928         for(bh = head, block_start = 0; bh != head || !block_start;
1929             block++, block_start=block_end, bh = bh->b_this_page) {
1930                 block_end = block_start + blocksize;
1931                 if (block_end <= from || block_start >= to) {
1932                         if (PageUptodate(page)) {
1933                                 if (!buffer_uptodate(bh))
1934                                         set_buffer_uptodate(bh);
1935                         }
1936                         continue;
1937                 }
1938                 if (buffer_new(bh))
1939                         clear_buffer_new(bh);
1940                 if (!buffer_mapped(bh)) {
1941                         WARN_ON(bh->b_size != blocksize);
1942                         err = get_block(inode, block, bh, 1);
1943                         if (err)
1944                                 break;
1945                         if (buffer_new(bh)) {
1946                                 unmap_underlying_metadata(bh->b_bdev,
1947                                                         bh->b_blocknr);
1948                                 if (PageUptodate(page)) {
1949                                         clear_buffer_new(bh);
1950                                         set_buffer_uptodate(bh);
1951                                         mark_buffer_dirty(bh);
1952                                         continue;
1953                                 }
1954                                 if (block_end > to || block_start < from)
1955                                         zero_user_segments(page,
1956                                                 to, block_end,
1957                                                 block_start, from);
1958                                 continue;
1959                         }
1960                 }
1961                 if (PageUptodate(page)) {
1962                         if (!buffer_uptodate(bh))
1963                                 set_buffer_uptodate(bh);
1964                         continue; 
1965                 }
1966                 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1967                     !buffer_unwritten(bh) &&
1968                      (block_start < from || block_end > to)) {
1969                         ll_rw_block(READ, 1, &bh);
1970                         *wait_bh++=bh;
1971                 }
1972         }
1973         /*
1974          * If we issued read requests - let them complete.
1975          */
1976         while(wait_bh > wait) {
1977                 wait_on_buffer(*--wait_bh);
1978                 if (!buffer_uptodate(*wait_bh))
1979                         err = -EIO;
1980         }
1981         if (unlikely(err))
1982                 page_zero_new_buffers(page, from, to);
1983         return err;
1984 }
1985 EXPORT_SYMBOL(__block_write_begin);
1986
1987 static int __block_commit_write(struct inode *inode, struct page *page,
1988                 unsigned from, unsigned to)
1989 {
1990         unsigned block_start, block_end;
1991         int partial = 0;
1992         unsigned blocksize;
1993         struct buffer_head *bh, *head;
1994
1995         bh = head = page_buffers(page);
1996         blocksize = bh->b_size;
1997
1998         block_start = 0;
1999         do {
2000                 block_end = block_start + blocksize;
2001                 if (block_end <= from || block_start >= to) {
2002                         if (!buffer_uptodate(bh))
2003                                 partial = 1;
2004                 } else {
2005                         set_buffer_uptodate(bh);
2006                         mark_buffer_dirty(bh);
2007                 }
2008                 clear_buffer_new(bh);
2009
2010                 block_start = block_end;
2011                 bh = bh->b_this_page;
2012         } while (bh != head);
2013
2014         /*
2015          * If this is a partial write which happened to make all buffers
2016          * uptodate then we can optimize away a bogus readpage() for
2017          * the next read(). Here we 'discover' whether the page went
2018          * uptodate as a result of this (potentially partial) write.
2019          */
2020         if (!partial)
2021                 SetPageUptodate(page);
2022         return 0;
2023 }
2024
2025 /*
2026  * block_write_begin takes care of the basic task of block allocation and
2027  * bringing partial write blocks uptodate first.
2028  *
2029  * The filesystem needs to handle block truncation upon failure.
2030  */
2031 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2032                 unsigned flags, struct page **pagep, get_block_t *get_block)
2033 {
2034         pgoff_t index = pos >> PAGE_CACHE_SHIFT;
2035         struct page *page;
2036         int status;
2037
2038         page = grab_cache_page_write_begin(mapping, index, flags);
2039         if (!page)
2040                 return -ENOMEM;
2041
2042         status = __block_write_begin(page, pos, len, get_block);
2043         if (unlikely(status)) {
2044                 unlock_page(page);
2045                 page_cache_release(page);
2046                 page = NULL;
2047         }
2048
2049         *pagep = page;
2050         return status;
2051 }
2052 EXPORT_SYMBOL(block_write_begin);
2053
2054 int block_write_end(struct file *file, struct address_space *mapping,
2055                         loff_t pos, unsigned len, unsigned copied,
2056                         struct page *page, void *fsdata)
2057 {
2058         struct inode *inode = mapping->host;
2059         unsigned start;
2060
2061         start = pos & (PAGE_CACHE_SIZE - 1);
2062
2063         if (unlikely(copied < len)) {
2064                 /*
2065                  * The buffers that were written will now be uptodate, so we
2066                  * don't have to worry about a readpage reading them and
2067                  * overwriting a partial write. However if we have encountered
2068                  * a short write and only partially written into a buffer, it
2069                  * will not be marked uptodate, so a readpage might come in and
2070                  * destroy our partial write.
2071                  *
2072                  * Do the simplest thing, and just treat any short write to a
2073                  * non uptodate page as a zero-length write, and force the
2074                  * caller to redo the whole thing.
2075                  */
2076                 if (!PageUptodate(page))
2077                         copied = 0;
2078
2079                 page_zero_new_buffers(page, start+copied, start+len);
2080         }
2081         flush_dcache_page(page);
2082
2083         /* This could be a short (even 0-length) commit */
2084         __block_commit_write(inode, page, start, start+copied);
2085
2086         return copied;
2087 }
2088 EXPORT_SYMBOL(block_write_end);
2089
2090 int generic_write_end(struct file *file, struct address_space *mapping,
2091                         loff_t pos, unsigned len, unsigned copied,
2092                         struct page *page, void *fsdata)
2093 {
2094         struct inode *inode = mapping->host;
2095         loff_t old_size = inode->i_size;
2096         int i_size_changed = 0;
2097
2098         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2099
2100         /*
2101          * No need to use i_size_read() here, the i_size
2102          * cannot change under us because we hold i_mutex.
2103          *
2104          * But it's important to update i_size while still holding page lock:
2105          * page writeout could otherwise come in and zero beyond i_size.
2106          */
2107         if (pos+copied > inode->i_size) {
2108                 i_size_write(inode, pos+copied);
2109                 i_size_changed = 1;
2110         }
2111
2112         unlock_page(page);
2113         page_cache_release(page);
2114
2115         if (old_size < pos)
2116                 pagecache_isize_extended(inode, old_size, pos);
2117         /*
2118          * Don't mark the inode dirty under page lock. First, it unnecessarily
2119          * makes the holding time of page lock longer. Second, it forces lock
2120          * ordering of page lock and transaction start for journaling
2121          * filesystems.
2122          */
2123         if (i_size_changed)
2124                 mark_inode_dirty(inode);
2125
2126         return copied;
2127 }
2128 EXPORT_SYMBOL(generic_write_end);
2129
2130 /*
2131  * block_is_partially_uptodate checks whether buffers within a page are
2132  * uptodate or not.
2133  *
2134  * Returns true if all buffers which correspond to a file portion
2135  * we want to read are uptodate.
2136  */
2137 int block_is_partially_uptodate(struct page *page, unsigned long from,
2138                                         unsigned long count)
2139 {
2140         unsigned block_start, block_end, blocksize;
2141         unsigned to;
2142         struct buffer_head *bh, *head;
2143         int ret = 1;
2144
2145         if (!page_has_buffers(page))
2146                 return 0;
2147
2148         head = page_buffers(page);
2149         blocksize = head->b_size;
2150         to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
2151         to = from + to;
2152         if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
2153                 return 0;
2154
2155         bh = head;
2156         block_start = 0;
2157         do {
2158                 block_end = block_start + blocksize;
2159                 if (block_end > from && block_start < to) {
2160                         if (!buffer_uptodate(bh)) {
2161                                 ret = 0;
2162                                 break;
2163                         }
2164                         if (block_end >= to)
2165                                 break;
2166                 }
2167                 block_start = block_end;
2168                 bh = bh->b_this_page;
2169         } while (bh != head);
2170
2171         return ret;
2172 }
2173 EXPORT_SYMBOL(block_is_partially_uptodate);
2174
2175 /*
2176  * Generic "read page" function for block devices that have the normal
2177  * get_block functionality. This is most of the block device filesystems.
2178  * Reads the page asynchronously --- the unlock_buffer() and
2179  * set/clear_buffer_uptodate() functions propagate buffer state into the
2180  * page struct once IO has completed.
2181  */
2182 int block_read_full_page(struct page *page, get_block_t *get_block)
2183 {
2184         struct inode *inode = page->mapping->host;
2185         sector_t iblock, lblock;
2186         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2187         unsigned int blocksize, bbits;
2188         int nr, i;
2189         int fully_mapped = 1;
2190
2191         head = create_page_buffers(page, inode, 0);
2192         blocksize = head->b_size;
2193         bbits = block_size_bits(blocksize);
2194
2195         iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
2196         lblock = (i_size_read(inode)+blocksize-1) >> bbits;
2197         bh = head;
2198         nr = 0;
2199         i = 0;
2200
2201         do {
2202                 if (buffer_uptodate(bh))
2203                         continue;
2204
2205                 if (!buffer_mapped(bh)) {
2206                         int err = 0;
2207
2208                         fully_mapped = 0;
2209                         if (iblock < lblock) {
2210                                 WARN_ON(bh->b_size != blocksize);
2211                                 err = get_block(inode, iblock, bh, 0);
2212                                 if (err)
2213                                         SetPageError(page);
2214                         }
2215                         if (!buffer_mapped(bh)) {
2216                                 zero_user(page, i * blocksize, blocksize);
2217                                 if (!err)
2218                                         set_buffer_uptodate(bh);
2219                                 continue;
2220                         }
2221                         /*
2222                          * get_block() might have updated the buffer
2223                          * synchronously
2224                          */
2225                         if (buffer_uptodate(bh))
2226                                 continue;
2227                 }
2228                 arr[nr++] = bh;
2229         } while (i++, iblock++, (bh = bh->b_this_page) != head);
2230
2231         if (fully_mapped)
2232                 SetPageMappedToDisk(page);
2233
2234         if (!nr) {
2235                 /*
2236                  * All buffers are uptodate - we can set the page uptodate
2237                  * as well. But not if get_block() returned an error.
2238                  */
2239                 if (!PageError(page))
2240                         SetPageUptodate(page);
2241                 unlock_page(page);
2242                 return 0;
2243         }
2244
2245         /* Stage two: lock the buffers */
2246         for (i = 0; i < nr; i++) {
2247                 bh = arr[i];
2248                 lock_buffer(bh);
2249                 mark_buffer_async_read(bh);
2250         }
2251
2252         /*
2253          * Stage 3: start the IO.  Check for uptodateness
2254          * inside the buffer lock in case another process reading
2255          * the underlying blockdev brought it uptodate (the sct fix).
2256          */
2257         for (i = 0; i < nr; i++) {
2258                 bh = arr[i];
2259                 if (buffer_uptodate(bh))
2260                         end_buffer_async_read(bh, 1);
2261                 else
2262                         submit_bh(READ, bh);
2263         }
2264         return 0;
2265 }
2266 EXPORT_SYMBOL(block_read_full_page);
2267
2268 /* utility function for filesystems that need to do work on expanding
2269  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2270  * deal with the hole.  
2271  */
2272 int generic_cont_expand_simple(struct inode *inode, loff_t size)
2273 {
2274         struct address_space *mapping = inode->i_mapping;
2275         struct page *page;
2276         void *fsdata;
2277         int err;
2278
2279         err = inode_newsize_ok(inode, size);
2280         if (err)
2281                 goto out;
2282
2283         err = pagecache_write_begin(NULL, mapping, size, 0,
2284                                 AOP_FLAG_UNINTERRUPTIBLE|AOP_FLAG_CONT_EXPAND,
2285                                 &page, &fsdata);
2286         if (err)
2287                 goto out;
2288
2289         err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata);
2290         BUG_ON(err > 0);
2291
2292 out:
2293         return err;
2294 }
2295 EXPORT_SYMBOL(generic_cont_expand_simple);
2296
2297 static int cont_expand_zero(struct file *file, struct address_space *mapping,
2298                             loff_t pos, loff_t *bytes)
2299 {
2300         struct inode *inode = mapping->host;
2301         unsigned blocksize = 1 << inode->i_blkbits;
2302         struct page *page;
2303         void *fsdata;
2304         pgoff_t index, curidx;
2305         loff_t curpos;
2306         unsigned zerofrom, offset, len;
2307         int err = 0;
2308
2309         index = pos >> PAGE_CACHE_SHIFT;
2310         offset = pos & ~PAGE_CACHE_MASK;
2311
2312         while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) {
2313                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2314                 if (zerofrom & (blocksize-1)) {
2315                         *bytes |= (blocksize-1);
2316                         (*bytes)++;
2317                 }
2318                 len = PAGE_CACHE_SIZE - zerofrom;
2319
2320                 err = pagecache_write_begin(file, mapping, curpos, len,
2321                                                 AOP_FLAG_UNINTERRUPTIBLE,
2322                                                 &page, &fsdata);
2323                 if (err)
2324                         goto out;
2325                 zero_user(page, zerofrom, len);
2326                 err = pagecache_write_end(file, mapping, curpos, len, len,
2327                                                 page, fsdata);
2328                 if (err < 0)
2329                         goto out;
2330                 BUG_ON(err != len);
2331                 err = 0;
2332
2333                 balance_dirty_pages_ratelimited(mapping);
2334
2335                 if (unlikely(fatal_signal_pending(current))) {
2336                         err = -EINTR;
2337                         goto out;
2338                 }
2339         }
2340
2341         /* page covers the boundary, find the boundary offset */
2342         if (index == curidx) {
2343                 zerofrom = curpos & ~PAGE_CACHE_MASK;
2344                 /* if we will expand the thing last block will be filled */
2345                 if (offset <= zerofrom) {
2346                         goto out;
2347                 }
2348                 if (zerofrom & (blocksize-1)) {
2349                         *bytes |= (blocksize-1);
2350                         (*bytes)++;
2351                 }
2352                 len = offset - zerofrom;
2353
2354                 err = pagecache_write_begin(file, mapping, curpos, len,
2355                                                 AOP_FLAG_UNINTERRUPTIBLE,
2356                                                 &page, &fsdata);
2357                 if (err)
2358                         goto out;
2359                 zero_user(page, zerofrom, len);
2360                 err = pagecache_write_end(file, mapping, curpos, len, len,
2361                                                 page, fsdata);
2362                 if (err < 0)
2363                         goto out;
2364                 BUG_ON(err != len);
2365                 err = 0;
2366         }
2367 out:
2368         return err;
2369 }
2370
2371 /*
2372  * For moronic filesystems that do not allow holes in file.
2373  * We may have to extend the file.
2374  */
2375 int cont_write_begin(struct file *file, struct address_space *mapping,
2376                         loff_t pos, unsigned len, unsigned flags,
2377                         struct page **pagep, void **fsdata,
2378                         get_block_t *get_block, loff_t *bytes)
2379 {
2380         struct inode *inode = mapping->host;
2381         unsigned blocksize = 1 << inode->i_blkbits;
2382         unsigned zerofrom;
2383         int err;
2384
2385         err = cont_expand_zero(file, mapping, pos, bytes);
2386         if (err)
2387                 return err;
2388
2389         zerofrom = *bytes & ~PAGE_CACHE_MASK;
2390         if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2391                 *bytes |= (blocksize-1);
2392                 (*bytes)++;
2393         }
2394
2395         return block_write_begin(mapping, pos, len, flags, pagep, get_block);
2396 }
2397 EXPORT_SYMBOL(cont_write_begin);
2398
2399 int block_commit_write(struct page *page, unsigned from, unsigned to)
2400 {
2401         struct inode *inode = page->mapping->host;
2402         __block_commit_write(inode,page,from,to);
2403         return 0;
2404 }
2405 EXPORT_SYMBOL(block_commit_write);
2406
2407 /*
2408  * block_page_mkwrite() is not allowed to change the file size as it gets
2409  * called from a page fault handler when a page is first dirtied. Hence we must
2410  * be careful to check for EOF conditions here. We set the page up correctly
2411  * for a written page which means we get ENOSPC checking when writing into
2412  * holes and correct delalloc and unwritten extent mapping on filesystems that
2413  * support these features.
2414  *
2415  * We are not allowed to take the i_mutex here so we have to play games to
2416  * protect against truncate races as the page could now be beyond EOF.  Because
2417  * truncate writes the inode size before removing pages, once we have the
2418  * page lock we can determine safely if the page is beyond EOF. If it is not
2419  * beyond EOF, then the page is guaranteed safe against truncation until we
2420  * unlock the page.
2421  *
2422  * Direct callers of this function should protect against filesystem freezing
2423  * using sb_start_pagefault() - sb_end_pagefault() functions.
2424  */
2425 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2426                          get_block_t get_block)
2427 {
2428         struct page *page = vmf->page;
2429         struct inode *inode = file_inode(vma->vm_file);
2430         unsigned long end;
2431         loff_t size;
2432         int ret;
2433
2434         lock_page(page);
2435         size = i_size_read(inode);
2436         if ((page->mapping != inode->i_mapping) ||
2437             (page_offset(page) > size)) {
2438                 /* We overload EFAULT to mean page got truncated */
2439                 ret = -EFAULT;
2440                 goto out_unlock;
2441         }
2442
2443         /* page is wholly or partially inside EOF */
2444         if (((page->index + 1) << PAGE_CACHE_SHIFT) > size)
2445                 end = size & ~PAGE_CACHE_MASK;
2446         else
2447                 end = PAGE_CACHE_SIZE;
2448
2449         ret = __block_write_begin(page, 0, end, get_block);
2450         if (!ret)
2451                 ret = block_commit_write(page, 0, end);
2452
2453         if (unlikely(ret < 0))
2454                 goto out_unlock;
2455         set_page_dirty(page);
2456         wait_for_stable_page(page);
2457         return 0;
2458 out_unlock:
2459         unlock_page(page);
2460         return ret;
2461 }
2462 EXPORT_SYMBOL(block_page_mkwrite);
2463
2464 /*
2465  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
2466  * immediately, while under the page lock.  So it needs a special end_io
2467  * handler which does not touch the bh after unlocking it.
2468  */
2469 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2470 {
2471         __end_buffer_read_notouch(bh, uptodate);
2472 }
2473
2474 /*
2475  * Attach the singly-linked list of buffers created by nobh_write_begin, to
2476  * the page (converting it to circular linked list and taking care of page
2477  * dirty races).
2478  */
2479 static void attach_nobh_buffers(struct page *page, struct buffer_head *head)
2480 {
2481         struct buffer_head *bh;
2482
2483         BUG_ON(!PageLocked(page));
2484
2485         spin_lock(&page->mapping->private_lock);
2486         bh = head;
2487         do {
2488                 if (PageDirty(page))
2489                         set_buffer_dirty(bh);
2490                 if (!bh->b_this_page)
2491                         bh->b_this_page = head;
2492                 bh = bh->b_this_page;
2493         } while (bh != head);
2494         attach_page_buffers(page, head);
2495         spin_unlock(&page->mapping->private_lock);
2496 }
2497
2498 /*
2499  * On entry, the page is fully not uptodate.
2500  * On exit the page is fully uptodate in the areas outside (from,to)
2501  * The filesystem needs to handle block truncation upon failure.
2502  */
2503 int nobh_write_begin(struct address_space *mapping,
2504                         loff_t pos, unsigned len, unsigned flags,
2505                         struct page **pagep, void **fsdata,
2506                         get_block_t *get_block)
2507 {
2508         struct inode *inode = mapping->host;
2509         const unsigned blkbits = inode->i_blkbits;
2510         const unsigned blocksize = 1 << blkbits;
2511         struct buffer_head *head, *bh;
2512         struct page *page;
2513         pgoff_t index;
2514         unsigned from, to;
2515         unsigned block_in_page;
2516         unsigned block_start, block_end;
2517         sector_t block_in_file;
2518         int nr_reads = 0;
2519         int ret = 0;
2520         int is_mapped_to_disk = 1;
2521
2522         index = pos >> PAGE_CACHE_SHIFT;
2523         from = pos & (PAGE_CACHE_SIZE - 1);
2524         to = from + len;
2525
2526         page = grab_cache_page_write_begin(mapping, index, flags);
2527         if (!page)
2528                 return -ENOMEM;
2529         *pagep = page;
2530         *fsdata = NULL;
2531
2532         if (page_has_buffers(page)) {
2533                 ret = __block_write_begin(page, pos, len, get_block);
2534                 if (unlikely(ret))
2535                         goto out_release;
2536                 return ret;
2537         }
2538
2539         if (PageMappedToDisk(page))
2540                 return 0;
2541
2542         /*
2543          * Allocate buffers so that we can keep track of state, and potentially
2544          * attach them to the page if an error occurs. In the common case of
2545          * no error, they will just be freed again without ever being attached
2546          * to the page (which is all OK, because we're under the page lock).
2547          *
2548          * Be careful: the buffer linked list is a NULL terminated one, rather
2549          * than the circular one we're used to.
2550          */
2551         head = alloc_page_buffers(page, blocksize, 0);
2552         if (!head) {
2553                 ret = -ENOMEM;
2554                 goto out_release;
2555         }
2556
2557         block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
2558
2559         /*
2560          * We loop across all blocks in the page, whether or not they are
2561          * part of the affected region.  This is so we can discover if the
2562          * page is fully mapped-to-disk.
2563          */
2564         for (block_start = 0, block_in_page = 0, bh = head;
2565                   block_start < PAGE_CACHE_SIZE;
2566                   block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2567                 int create;
2568
2569                 block_end = block_start + blocksize;
2570                 bh->b_state = 0;
2571                 create = 1;
2572                 if (block_start >= to)
2573                         create = 0;
2574                 ret = get_block(inode, block_in_file + block_in_page,
2575                                         bh, create);
2576                 if (ret)
2577                         goto failed;
2578                 if (!buffer_mapped(bh))
2579                         is_mapped_to_disk = 0;
2580                 if (buffer_new(bh))
2581                         unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
2582                 if (PageUptodate(page)) {
2583                         set_buffer_uptodate(bh);
2584                         continue;
2585                 }
2586                 if (buffer_new(bh) || !buffer_mapped(bh)) {
2587                         zero_user_segments(page, block_start, from,
2588                                                         to, block_end);
2589                         continue;
2590                 }
2591                 if (buffer_uptodate(bh))
2592                         continue;       /* reiserfs does this */
2593                 if (block_start < from || block_end > to) {
2594                         lock_buffer(bh);
2595                         bh->b_end_io = end_buffer_read_nobh;
2596                         submit_bh(READ, bh);
2597                         nr_reads++;
2598                 }
2599         }
2600
2601         if (nr_reads) {
2602                 /*
2603                  * The page is locked, so these buffers are protected from
2604                  * any VM or truncate activity.  Hence we don't need to care
2605                  * for the buffer_head refcounts.
2606                  */
2607                 for (bh = head; bh; bh = bh->b_this_page) {
2608                         wait_on_buffer(bh);
2609                         if (!buffer_uptodate(bh))
2610                                 ret = -EIO;
2611                 }
2612                 if (ret)
2613                         goto failed;
2614         }
2615
2616         if (is_mapped_to_disk)
2617                 SetPageMappedToDisk(page);
2618
2619         *fsdata = head; /* to be released by nobh_write_end */
2620
2621         return 0;
2622
2623 failed:
2624         BUG_ON(!ret);
2625         /*
2626          * Error recovery is a bit difficult. We need to zero out blocks that
2627          * were newly allocated, and dirty them to ensure they get written out.
2628          * Buffers need to be attached to the page at this point, otherwise
2629          * the handling of potential IO errors during writeout would be hard
2630          * (could try doing synchronous writeout, but what if that fails too?)
2631          */
2632         attach_nobh_buffers(page, head);
2633         page_zero_new_buffers(page, from, to);
2634
2635 out_release:
2636         unlock_page(page);
2637         page_cache_release(page);
2638         *pagep = NULL;
2639
2640         return ret;
2641 }
2642 EXPORT_SYMBOL(nobh_write_begin);
2643
2644 int nobh_write_end(struct file *file, struct address_space *mapping,
2645                         loff_t pos, unsigned len, unsigned copied,
2646                         struct page *page, void *fsdata)
2647 {
2648         struct inode *inode = page->mapping->host;
2649         struct buffer_head *head = fsdata;
2650         struct buffer_head *bh;
2651         BUG_ON(fsdata != NULL && page_has_buffers(page));
2652
2653         if (unlikely(copied < len) && head)
2654                 attach_nobh_buffers(page, head);
2655         if (page_has_buffers(page))
2656                 return generic_write_end(file, mapping, pos, len,
2657                                         copied, page, fsdata);
2658
2659         SetPageUptodate(page);
2660         set_page_dirty(page);
2661         if (pos+copied > inode->i_size) {
2662                 i_size_write(inode, pos+copied);
2663                 mark_inode_dirty(inode);
2664         }
2665
2666         unlock_page(page);
2667         page_cache_release(page);
2668
2669         while (head) {
2670                 bh = head;
2671                 head = head->b_this_page;
2672                 free_buffer_head(bh);
2673         }
2674
2675         return copied;
2676 }
2677 EXPORT_SYMBOL(nobh_write_end);
2678
2679 /*
2680  * nobh_writepage() - based on block_full_write_page() except
2681  * that it tries to operate without attaching bufferheads to
2682  * the page.
2683  */
2684 int nobh_writepage(struct page *page, get_block_t *get_block,
2685                         struct writeback_control *wbc)
2686 {
2687         struct inode * const inode = page->mapping->host;
2688         loff_t i_size = i_size_read(inode);
2689         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2690         unsigned offset;
2691         int ret;
2692
2693         /* Is the page fully inside i_size? */
2694         if (page->index < end_index)
2695                 goto out;
2696
2697         /* Is the page fully outside i_size? (truncate in progress) */
2698         offset = i_size & (PAGE_CACHE_SIZE-1);
2699         if (page->index >= end_index+1 || !offset) {
2700                 /*
2701                  * The page may have dirty, unmapped buffers.  For example,
2702                  * they may have been added in ext3_writepage().  Make them
2703                  * freeable here, so the page does not leak.
2704                  */
2705 #if 0
2706                 /* Not really sure about this  - do we need this ? */
2707                 if (page->mapping->a_ops->invalidatepage)
2708                         page->mapping->a_ops->invalidatepage(page, offset);
2709 #endif
2710                 unlock_page(page);
2711                 return 0; /* don't care */
2712         }
2713
2714         /*
2715          * The page straddles i_size.  It must be zeroed out on each and every
2716          * writepage invocation because it may be mmapped.  "A file is mapped
2717          * in multiples of the page size.  For a file that is not a multiple of
2718          * the  page size, the remaining memory is zeroed when mapped, and
2719          * writes to that region are not written out to the file."
2720          */
2721         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2722 out:
2723         ret = mpage_writepage(page, get_block, wbc);
2724         if (ret == -EAGAIN)
2725                 ret = __block_write_full_page(inode, page, get_block, wbc,
2726                                               end_buffer_async_write);
2727         return ret;
2728 }
2729 EXPORT_SYMBOL(nobh_writepage);
2730
2731 int nobh_truncate_page(struct address_space *mapping,
2732                         loff_t from, get_block_t *get_block)
2733 {
2734         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2735         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2736         unsigned blocksize;
2737         sector_t iblock;
2738         unsigned length, pos;
2739         struct inode *inode = mapping->host;
2740         struct page *page;
2741         struct buffer_head map_bh;
2742         int err;
2743
2744         blocksize = 1 << inode->i_blkbits;
2745         length = offset & (blocksize - 1);
2746
2747         /* Block boundary? Nothing to do */
2748         if (!length)
2749                 return 0;
2750
2751         length = blocksize - length;
2752         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2753
2754         page = grab_cache_page(mapping, index);
2755         err = -ENOMEM;
2756         if (!page)
2757                 goto out;
2758
2759         if (page_has_buffers(page)) {
2760 has_buffers:
2761                 unlock_page(page);
2762                 page_cache_release(page);
2763                 return block_truncate_page(mapping, from, get_block);
2764         }
2765
2766         /* Find the buffer that contains "offset" */
2767         pos = blocksize;
2768         while (offset >= pos) {
2769                 iblock++;
2770                 pos += blocksize;
2771         }
2772
2773         map_bh.b_size = blocksize;
2774         map_bh.b_state = 0;
2775         err = get_block(inode, iblock, &map_bh, 0);
2776         if (err)
2777                 goto unlock;
2778         /* unmapped? It's a hole - nothing to do */
2779         if (!buffer_mapped(&map_bh))
2780                 goto unlock;
2781
2782         /* Ok, it's mapped. Make sure it's up-to-date */
2783         if (!PageUptodate(page)) {
2784                 err = mapping->a_ops->readpage(NULL, page);
2785                 if (err) {
2786                         page_cache_release(page);
2787                         goto out;
2788                 }
2789                 lock_page(page);
2790                 if (!PageUptodate(page)) {
2791                         err = -EIO;
2792                         goto unlock;
2793                 }
2794                 if (page_has_buffers(page))
2795                         goto has_buffers;
2796         }
2797         zero_user(page, offset, length);
2798         set_page_dirty(page);
2799         err = 0;
2800
2801 unlock:
2802         unlock_page(page);
2803         page_cache_release(page);
2804 out:
2805         return err;
2806 }
2807 EXPORT_SYMBOL(nobh_truncate_page);
2808
2809 int block_truncate_page(struct address_space *mapping,
2810                         loff_t from, get_block_t *get_block)
2811 {
2812         pgoff_t index = from >> PAGE_CACHE_SHIFT;
2813         unsigned offset = from & (PAGE_CACHE_SIZE-1);
2814         unsigned blocksize;
2815         sector_t iblock;
2816         unsigned length, pos;
2817         struct inode *inode = mapping->host;
2818         struct page *page;
2819         struct buffer_head *bh;
2820         int err;
2821
2822         blocksize = 1 << inode->i_blkbits;
2823         length = offset & (blocksize - 1);
2824
2825         /* Block boundary? Nothing to do */
2826         if (!length)
2827                 return 0;
2828
2829         length = blocksize - length;
2830         iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
2831         
2832         page = grab_cache_page(mapping, index);
2833         err = -ENOMEM;
2834         if (!page)
2835                 goto out;
2836
2837         if (!page_has_buffers(page))
2838                 create_empty_buffers(page, blocksize, 0);
2839
2840         /* Find the buffer that contains "offset" */
2841         bh = page_buffers(page);
2842         pos = blocksize;
2843         while (offset >= pos) {
2844                 bh = bh->b_this_page;
2845                 iblock++;
2846                 pos += blocksize;
2847         }
2848
2849         err = 0;
2850         if (!buffer_mapped(bh)) {
2851                 WARN_ON(bh->b_size != blocksize);
2852                 err = get_block(inode, iblock, bh, 0);
2853                 if (err)
2854                         goto unlock;
2855                 /* unmapped? It's a hole - nothing to do */
2856                 if (!buffer_mapped(bh))
2857                         goto unlock;
2858         }
2859
2860         /* Ok, it's mapped. Make sure it's up-to-date */
2861         if (PageUptodate(page))
2862                 set_buffer_uptodate(bh);
2863
2864         if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2865                 err = -EIO;
2866                 ll_rw_block(READ, 1, &bh);
2867                 wait_on_buffer(bh);
2868                 /* Uhhuh. Read error. Complain and punt. */
2869                 if (!buffer_uptodate(bh))
2870                         goto unlock;
2871         }
2872
2873         zero_user(page, offset, length);
2874         mark_buffer_dirty(bh);
2875         err = 0;
2876
2877 unlock:
2878         unlock_page(page);
2879         page_cache_release(page);
2880 out:
2881         return err;
2882 }
2883 EXPORT_SYMBOL(block_truncate_page);
2884
2885 /*
2886  * The generic ->writepage function for buffer-backed address_spaces
2887  */
2888 int block_write_full_page(struct page *page, get_block_t *get_block,
2889                         struct writeback_control *wbc)
2890 {
2891         struct inode * const inode = page->mapping->host;
2892         loff_t i_size = i_size_read(inode);
2893         const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
2894         unsigned offset;
2895
2896         /* Is the page fully inside i_size? */
2897         if (page->index < end_index)
2898                 return __block_write_full_page(inode, page, get_block, wbc,
2899                                                end_buffer_async_write);
2900
2901         /* Is the page fully outside i_size? (truncate in progress) */
2902         offset = i_size & (PAGE_CACHE_SIZE-1);
2903         if (page->index >= end_index+1 || !offset) {
2904                 /*
2905                  * The page may have dirty, unmapped buffers.  For example,
2906                  * they may have been added in ext3_writepage().  Make them
2907                  * freeable here, so the page does not leak.
2908                  */
2909                 do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
2910                 unlock_page(page);
2911                 return 0; /* don't care */
2912         }
2913
2914         /*
2915          * The page straddles i_size.  It must be zeroed out on each and every
2916          * writepage invocation because it may be mmapped.  "A file is mapped
2917          * in multiples of the page size.  For a file that is not a multiple of
2918          * the  page size, the remaining memory is zeroed when mapped, and
2919          * writes to that region are not written out to the file."
2920          */
2921         zero_user_segment(page, offset, PAGE_CACHE_SIZE);
2922         return __block_write_full_page(inode, page, get_block, wbc,
2923                                                         end_buffer_async_write);
2924 }
2925 EXPORT_SYMBOL(block_write_full_page);
2926
2927 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2928                             get_block_t *get_block)
2929 {
2930         struct buffer_head tmp;
2931         struct inode *inode = mapping->host;
2932         tmp.b_state = 0;
2933         tmp.b_blocknr = 0;
2934         tmp.b_size = 1 << inode->i_blkbits;
2935         get_block(inode, block, &tmp, 0);
2936         return tmp.b_blocknr;
2937 }
2938 EXPORT_SYMBOL(generic_block_bmap);
2939
2940 static void end_bio_bh_io_sync(struct bio *bio)
2941 {
2942         struct buffer_head *bh = bio->bi_private;
2943
2944         if (unlikely(bio_flagged(bio, BIO_QUIET)))
2945                 set_bit(BH_Quiet, &bh->b_state);
2946
2947         bh->b_end_io(bh, !bio->bi_error);
2948         bio_put(bio);
2949 }
2950
2951 /*
2952  * This allows us to do IO even on the odd last sectors
2953  * of a device, even if the block size is some multiple
2954  * of the physical sector size.
2955  *
2956  * We'll just truncate the bio to the size of the device,
2957  * and clear the end of the buffer head manually.
2958  *
2959  * Truly out-of-range accesses will turn into actual IO
2960  * errors, this only handles the "we need to be able to
2961  * do IO at the final sector" case.
2962  */
2963 void guard_bio_eod(int rw, struct bio *bio)
2964 {
2965         sector_t maxsector;
2966         struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
2967         unsigned truncated_bytes;
2968
2969         maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
2970         if (!maxsector)
2971                 return;
2972
2973         /*
2974          * If the *whole* IO is past the end of the device,
2975          * let it through, and the IO layer will turn it into
2976          * an EIO.
2977          */
2978         if (unlikely(bio->bi_iter.bi_sector >= maxsector))
2979                 return;
2980
2981         maxsector -= bio->bi_iter.bi_sector;
2982         if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
2983                 return;
2984
2985         /* Uhhuh. We've got a bio that straddles the device size! */
2986         truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
2987
2988         /* Truncate the bio.. */
2989         bio->bi_iter.bi_size -= truncated_bytes;
2990         bvec->bv_len -= truncated_bytes;
2991
2992         /* ..and clear the end of the buffer for reads */
2993         if ((rw & RW_MASK) == READ) {
2994                 zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len,
2995                                 truncated_bytes);
2996         }
2997 }
2998
2999 static int submit_bh_wbc(int rw, struct buffer_head *bh,
3000                          unsigned long bio_flags, struct writeback_control *wbc)
3001 {
3002         struct bio *bio;
3003
3004         BUG_ON(!buffer_locked(bh));
3005         BUG_ON(!buffer_mapped(bh));
3006         BUG_ON(!bh->b_end_io);
3007         BUG_ON(buffer_delay(bh));
3008         BUG_ON(buffer_unwritten(bh));
3009
3010         /*
3011          * Only clear out a write error when rewriting
3012          */
3013         if (test_set_buffer_req(bh) && (rw & WRITE))
3014                 clear_buffer_write_io_error(bh);
3015
3016         /*
3017          * from here on down, it's all bio -- do the initial mapping,
3018          * submit_bio -> generic_make_request may further map this bio around
3019          */
3020         bio = bio_alloc(GFP_NOIO, 1);
3021
3022         if (wbc) {
3023                 wbc_init_bio(wbc, bio);
3024                 wbc_account_io(wbc, bh->b_page, bh->b_size);
3025         }
3026
3027         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3028         bio->bi_bdev = bh->b_bdev;
3029
3030         bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
3031         BUG_ON(bio->bi_iter.bi_size != bh->b_size);
3032
3033         bio->bi_end_io = end_bio_bh_io_sync;
3034         bio->bi_private = bh;
3035         bio->bi_flags |= bio_flags;
3036
3037         /* Take care of bh's that straddle the end of the device */
3038         guard_bio_eod(rw, bio);
3039
3040         if (buffer_meta(bh))
3041                 rw |= REQ_META;
3042         if (buffer_prio(bh))
3043                 rw |= REQ_PRIO;
3044
3045         submit_bio(rw, bio);
3046         return 0;
3047 }
3048
3049 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
3050 {
3051         return submit_bh_wbc(rw, bh, bio_flags, NULL);
3052 }
3053 EXPORT_SYMBOL_GPL(_submit_bh);
3054
3055 int submit_bh(int rw, struct buffer_head *bh)
3056 {
3057         return submit_bh_wbc(rw, bh, 0, NULL);
3058 }
3059 EXPORT_SYMBOL(submit_bh);
3060
3061 /**
3062  * ll_rw_block: low-level access to block devices (DEPRECATED)
3063  * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
3064  * @nr: number of &struct buffer_heads in the array
3065  * @bhs: array of pointers to &struct buffer_head
3066  *
3067  * ll_rw_block() takes an array of pointers to &struct buffer_heads, and
3068  * requests an I/O operation on them, either a %READ or a %WRITE.  The third
3069  * %READA option is described in the documentation for generic_make_request()
3070  * which ll_rw_block() calls.
3071  *
3072  * This function drops any buffer that it cannot get a lock on (with the
3073  * BH_Lock state bit), any buffer that appears to be clean when doing a write
3074  * request, and any buffer that appears to be up-to-date when doing read
3075  * request.  Further it marks as clean buffers that are processed for
3076  * writing (the buffer cache won't assume that they are actually clean
3077  * until the buffer gets unlocked).
3078  *
3079  * ll_rw_block sets b_end_io to simple completion handler that marks
3080  * the buffer up-to-date (if appropriate), unlocks the buffer and wakes
3081  * any waiters. 
3082  *
3083  * All of the buffers must be for the same device, and must also be a
3084  * multiple of the current approved size for the device.
3085  */
3086 void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
3087 {
3088         int i;
3089
3090         for (i = 0; i < nr; i++) {
3091                 struct buffer_head *bh = bhs[i];
3092
3093                 if (!trylock_buffer(bh))
3094                         continue;
3095                 if (rw == WRITE) {
3096                         if (test_clear_buffer_dirty(bh)) {
3097                                 bh->b_end_io = end_buffer_write_sync;
3098                                 get_bh(bh);
3099                                 submit_bh(WRITE, bh);
3100                                 continue;
3101                         }
3102                 } else {
3103                         if (!buffer_uptodate(bh)) {
3104                                 bh->b_end_io = end_buffer_read_sync;
3105                                 get_bh(bh);
3106                                 submit_bh(rw, bh);
3107                                 continue;
3108                         }
3109                 }
3110                 unlock_buffer(bh);
3111         }
3112 }
3113 EXPORT_SYMBOL(ll_rw_block);
3114
3115 void write_dirty_buffer(struct buffer_head *bh, int rw)
3116 {
3117         lock_buffer(bh);
3118         if (!test_clear_buffer_dirty(bh)) {
3119                 unlock_buffer(bh);
3120                 return;
3121         }
3122         bh->b_end_io = end_buffer_write_sync;
3123         get_bh(bh);
3124         submit_bh(rw, bh);
3125 }
3126 EXPORT_SYMBOL(write_dirty_buffer);
3127
3128 /*
3129  * For a data-integrity writeout, we need to wait upon any in-progress I/O
3130  * and then start new I/O and then wait upon it.  The caller must have a ref on
3131  * the buffer_head.
3132  */
3133 int __sync_dirty_buffer(struct buffer_head *bh, int rw)
3134 {
3135         int ret = 0;
3136
3137         WARN_ON(atomic_read(&bh->b_count) < 1);
3138         lock_buffer(bh);
3139         if (test_clear_buffer_dirty(bh)) {
3140                 get_bh(bh);
3141                 bh->b_end_io = end_buffer_write_sync;
3142                 ret = submit_bh(rw, bh);
3143                 wait_on_buffer(bh);
3144                 if (!ret && !buffer_uptodate(bh))
3145                         ret = -EIO;
3146         } else {
3147                 unlock_buffer(bh);
3148         }
3149         return ret;
3150 }
3151 EXPORT_SYMBOL(__sync_dirty_buffer);
3152
3153 int sync_dirty_buffer(struct buffer_head *bh)
3154 {
3155         return __sync_dirty_buffer(bh, WRITE_SYNC);
3156 }
3157 EXPORT_SYMBOL(sync_dirty_buffer);
3158
3159 /*
3160  * try_to_free_buffers() checks if all the buffers on this particular page
3161  * are unused, and releases them if so.
3162  *
3163  * Exclusion against try_to_free_buffers may be obtained by either
3164  * locking the page or by holding its mapping's private_lock.
3165  *
3166  * If the page is dirty but all the buffers are clean then we need to
3167  * be sure to mark the page clean as well.  This is because the page
3168  * may be against a block device, and a later reattachment of buffers
3169  * to a dirty page will set *all* buffers dirty.  Which would corrupt
3170  * filesystem data on the same device.
3171  *
3172  * The same applies to regular filesystem pages: if all the buffers are
3173  * clean then we set the page clean and proceed.  To do that, we require
3174  * total exclusion from __set_page_dirty_buffers().  That is obtained with
3175  * private_lock.
3176  *
3177  * try_to_free_buffers() is non-blocking.
3178  */
3179 static inline int buffer_busy(struct buffer_head *bh)
3180 {
3181         return atomic_read(&bh->b_count) |
3182                 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3183 }
3184
3185 static int
3186 drop_buffers(struct page *page, struct buffer_head **buffers_to_free)
3187 {
3188         struct buffer_head *head = page_buffers(page);
3189         struct buffer_head *bh;
3190
3191         bh = head;
3192         do {
3193                 if (buffer_write_io_error(bh) && page->mapping)
3194                         set_bit(AS_EIO, &page->mapping->flags);
3195                 if (buffer_busy(bh))
3196                         goto failed;
3197                 bh = bh->b_this_page;
3198         } while (bh != head);
3199
3200         do {
3201                 struct buffer_head *next = bh->b_this_page;
3202
3203                 if (bh->b_assoc_map)
3204                         __remove_assoc_queue(bh);
3205                 bh = next;
3206         } while (bh != head);
3207         *buffers_to_free = head;
3208         __clear_page_buffers(page);
3209         return 1;
3210 failed:
3211         return 0;
3212 }
3213
3214 int try_to_free_buffers(struct page *page)
3215 {
3216         struct address_space * const mapping = page->mapping;
3217         struct buffer_head *buffers_to_free = NULL;
3218         int ret = 0;
3219
3220         BUG_ON(!PageLocked(page));
3221         if (PageWriteback(page))
3222                 return 0;
3223
3224         if (mapping == NULL) {          /* can this still happen? */
3225                 ret = drop_buffers(page, &buffers_to_free);
3226                 goto out;
3227         }
3228
3229         spin_lock(&mapping->private_lock);
3230         ret = drop_buffers(page, &buffers_to_free);
3231
3232         /*
3233          * If the filesystem writes its buffers by hand (eg ext3)
3234          * then we can have clean buffers against a dirty page.  We
3235          * clean the page here; otherwise the VM will never notice
3236          * that the filesystem did any IO at all.
3237          *
3238          * Also, during truncate, discard_buffer will have marked all
3239          * the page's buffers clean.  We discover that here and clean
3240          * the page also.
3241          *
3242          * private_lock must be held over this entire operation in order
3243          * to synchronise against __set_page_dirty_buffers and prevent the
3244          * dirty bit from being lost.
3245          */
3246         if (ret)
3247                 cancel_dirty_page(page);
3248         spin_unlock(&mapping->private_lock);
3249 out:
3250         if (buffers_to_free) {
3251                 struct buffer_head *bh = buffers_to_free;
3252
3253                 do {
3254                         struct buffer_head *next = bh->b_this_page;
3255                         free_buffer_head(bh);
3256                         bh = next;
3257                 } while (bh != buffers_to_free);
3258         }
3259         return ret;
3260 }
3261 EXPORT_SYMBOL(try_to_free_buffers);
3262
3263 /*
3264  * There are no bdflush tunables left.  But distributions are
3265  * still running obsolete flush daemons, so we terminate them here.
3266  *
3267  * Use of bdflush() is deprecated and will be removed in a future kernel.
3268  * The `flush-X' kernel threads fully replace bdflush daemons and this call.
3269  */
3270 SYSCALL_DEFINE2(bdflush, int, func, long, data)
3271 {
3272         static int msg_count;
3273
3274         if (!capable(CAP_SYS_ADMIN))
3275                 return -EPERM;
3276
3277         if (msg_count < 5) {
3278                 msg_count++;
3279                 printk(KERN_INFO
3280                         "warning: process `%s' used the obsolete bdflush"
3281                         " system call\n", current->comm);
3282                 printk(KERN_INFO "Fix your initscripts?\n");
3283         }
3284
3285         if (func == 1)
3286                 do_exit(0);
3287         return 0;
3288 }
3289
3290 /*
3291  * Buffer-head allocation
3292  */
3293 static struct kmem_cache *bh_cachep __read_mostly;
3294
3295 /*
3296  * Once the number of bh's in the machine exceeds this level, we start
3297  * stripping them in writeback.
3298  */
3299 static unsigned long max_buffer_heads;
3300
3301 int buffer_heads_over_limit;
3302
3303 struct bh_accounting {
3304         int nr;                 /* Number of live bh's */
3305         int ratelimit;          /* Limit cacheline bouncing */
3306 };
3307
3308 static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3309
3310 static void recalc_bh_state(void)
3311 {
3312         int i;
3313         int tot = 0;
3314
3315         if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3316                 return;
3317         __this_cpu_write(bh_accounting.ratelimit, 0);
3318         for_each_online_cpu(i)
3319                 tot += per_cpu(bh_accounting, i).nr;
3320         buffer_heads_over_limit = (tot > max_buffer_heads);
3321 }
3322
3323 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3324 {
3325         struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3326         if (ret) {
3327                 INIT_LIST_HEAD(&ret->b_assoc_buffers);
3328                 preempt_disable();
3329                 __this_cpu_inc(bh_accounting.nr);
3330                 recalc_bh_state();
3331                 preempt_enable();
3332         }
3333         return ret;
3334 }
3335 EXPORT_SYMBOL(alloc_buffer_head);
3336
3337 void free_buffer_head(struct buffer_head *bh)
3338 {
3339         BUG_ON(!list_empty(&bh->b_assoc_buffers));
3340         kmem_cache_free(bh_cachep, bh);
3341         preempt_disable();
3342         __this_cpu_dec(bh_accounting.nr);
3343         recalc_bh_state();
3344         preempt_enable();
3345 }
3346 EXPORT_SYMBOL(free_buffer_head);
3347
3348 static void buffer_exit_cpu(int cpu)
3349 {
3350         int i;
3351         struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3352
3353         for (i = 0; i < BH_LRU_SIZE; i++) {
3354                 brelse(b->bhs[i]);
3355                 b->bhs[i] = NULL;
3356         }
3357         this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3358         per_cpu(bh_accounting, cpu).nr = 0;
3359 }
3360
3361 static int buffer_cpu_notify(struct notifier_block *self,
3362                               unsigned long action, void *hcpu)
3363 {
3364         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
3365                 buffer_exit_cpu((unsigned long)hcpu);
3366         return NOTIFY_OK;
3367 }
3368
3369 /**
3370  * bh_uptodate_or_lock - Test whether the buffer is uptodate
3371  * @bh: struct buffer_head
3372  *
3373  * Return true if the buffer is up-to-date and false,
3374  * with the buffer locked, if not.
3375  */
3376 int bh_uptodate_or_lock(struct buffer_head *bh)
3377 {
3378         if (!buffer_uptodate(bh)) {
3379                 lock_buffer(bh);
3380                 if (!buffer_uptodate(bh))
3381                         return 0;
3382                 unlock_buffer(bh);
3383         }
3384         return 1;
3385 }
3386 EXPORT_SYMBOL(bh_uptodate_or_lock);
3387
3388 /**
3389  * bh_submit_read - Submit a locked buffer for reading
3390  * @bh: struct buffer_head
3391  *
3392  * Returns zero on success and -EIO on error.
3393  */
3394 int bh_submit_read(struct buffer_head *bh)
3395 {
3396         BUG_ON(!buffer_locked(bh));
3397
3398         if (buffer_uptodate(bh)) {
3399                 unlock_buffer(bh);
3400                 return 0;
3401         }
3402
3403         get_bh(bh);
3404         bh->b_end_io = end_buffer_read_sync;
3405         submit_bh(READ, bh);
3406         wait_on_buffer(bh);
3407         if (buffer_uptodate(bh))
3408                 return 0;
3409         return -EIO;
3410 }
3411 EXPORT_SYMBOL(bh_submit_read);
3412
3413 void __init buffer_init(void)
3414 {
3415         unsigned long nrpages;
3416
3417         bh_cachep = kmem_cache_create("buffer_head",
3418                         sizeof(struct buffer_head), 0,
3419                                 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3420                                 SLAB_MEM_SPREAD),
3421                                 NULL);
3422
3423         /*
3424          * Limit the bh occupancy to 10% of ZONE_NORMAL
3425          */
3426         nrpages = (nr_free_buffer_pages() * 10) / 100;
3427         max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3428         hotcpu_notifier(buffer_cpu_notify, 0);
3429 }