]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/fscache/page.c
FS-Cache: Fix operation state management and accounting
[karo-tx-linux.git] / fs / fscache / page.c
1 /* Cache page management and data I/O routines
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19
20 /*
21  * check to see if a page is being written to the cache
22  */
23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24 {
25         void *val;
26
27         rcu_read_lock();
28         val = radix_tree_lookup(&cookie->stores, page->index);
29         rcu_read_unlock();
30
31         return val != NULL;
32 }
33 EXPORT_SYMBOL(__fscache_check_page_write);
34
35 /*
36  * wait for a page to finish being written to the cache
37  */
38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39 {
40         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42         wait_event(*wq, !__fscache_check_page_write(cookie, page));
43 }
44 EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46 /*
47  * decide whether a page can be released, possibly by cancelling a store to it
48  * - we're allowed to sleep if __GFP_WAIT is flagged
49  */
50 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51                                   struct page *page,
52                                   gfp_t gfp)
53 {
54         struct page *xpage;
55         void *val;
56
57         _enter("%p,%p,%x", cookie, page, gfp);
58
59         rcu_read_lock();
60         val = radix_tree_lookup(&cookie->stores, page->index);
61         if (!val) {
62                 rcu_read_unlock();
63                 fscache_stat(&fscache_n_store_vmscan_not_storing);
64                 __fscache_uncache_page(cookie, page);
65                 return true;
66         }
67
68         /* see if the page is actually undergoing storage - if so we can't get
69          * rid of it till the cache has finished with it */
70         if (radix_tree_tag_get(&cookie->stores, page->index,
71                                FSCACHE_COOKIE_STORING_TAG)) {
72                 rcu_read_unlock();
73                 goto page_busy;
74         }
75
76         /* the page is pending storage, so we attempt to cancel the store and
77          * discard the store request so that the page can be reclaimed */
78         spin_lock(&cookie->stores_lock);
79         rcu_read_unlock();
80
81         if (radix_tree_tag_get(&cookie->stores, page->index,
82                                FSCACHE_COOKIE_STORING_TAG)) {
83                 /* the page started to undergo storage whilst we were looking,
84                  * so now we can only wait or return */
85                 spin_unlock(&cookie->stores_lock);
86                 goto page_busy;
87         }
88
89         xpage = radix_tree_delete(&cookie->stores, page->index);
90         spin_unlock(&cookie->stores_lock);
91
92         if (xpage) {
93                 fscache_stat(&fscache_n_store_vmscan_cancelled);
94                 fscache_stat(&fscache_n_store_radix_deletes);
95                 ASSERTCMP(xpage, ==, page);
96         } else {
97                 fscache_stat(&fscache_n_store_vmscan_gone);
98         }
99
100         wake_up_bit(&cookie->flags, 0);
101         if (xpage)
102                 page_cache_release(xpage);
103         __fscache_uncache_page(cookie, page);
104         return true;
105
106 page_busy:
107         /* we might want to wait here, but that could deadlock the allocator as
108          * the work threads writing to the cache may all end up sleeping
109          * on memory allocation */
110         fscache_stat(&fscache_n_store_vmscan_busy);
111         return false;
112 }
113 EXPORT_SYMBOL(__fscache_maybe_release_page);
114
115 /*
116  * note that a page has finished being written to the cache
117  */
118 static void fscache_end_page_write(struct fscache_object *object,
119                                    struct page *page)
120 {
121         struct fscache_cookie *cookie;
122         struct page *xpage = NULL;
123
124         spin_lock(&object->lock);
125         cookie = object->cookie;
126         if (cookie) {
127                 /* delete the page from the tree if it is now no longer
128                  * pending */
129                 spin_lock(&cookie->stores_lock);
130                 radix_tree_tag_clear(&cookie->stores, page->index,
131                                      FSCACHE_COOKIE_STORING_TAG);
132                 if (!radix_tree_tag_get(&cookie->stores, page->index,
133                                         FSCACHE_COOKIE_PENDING_TAG)) {
134                         fscache_stat(&fscache_n_store_radix_deletes);
135                         xpage = radix_tree_delete(&cookie->stores, page->index);
136                 }
137                 spin_unlock(&cookie->stores_lock);
138                 wake_up_bit(&cookie->flags, 0);
139         }
140         spin_unlock(&object->lock);
141         if (xpage)
142                 page_cache_release(xpage);
143 }
144
145 /*
146  * actually apply the changed attributes to a cache object
147  */
148 static void fscache_attr_changed_op(struct fscache_operation *op)
149 {
150         struct fscache_object *object = op->object;
151         int ret;
152
153         _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
154
155         fscache_stat(&fscache_n_attr_changed_calls);
156
157         if (fscache_object_is_active(object)) {
158                 fscache_stat(&fscache_n_cop_attr_changed);
159                 ret = object->cache->ops->attr_changed(object);
160                 fscache_stat_d(&fscache_n_cop_attr_changed);
161                 if (ret < 0)
162                         fscache_abort_object(object);
163         }
164
165         fscache_op_complete(op);
166         _leave("");
167 }
168
169 /*
170  * notification that the attributes on an object have changed
171  */
172 int __fscache_attr_changed(struct fscache_cookie *cookie)
173 {
174         struct fscache_operation *op;
175         struct fscache_object *object;
176
177         _enter("%p", cookie);
178
179         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
180
181         fscache_stat(&fscache_n_attr_changed);
182
183         op = kzalloc(sizeof(*op), GFP_KERNEL);
184         if (!op) {
185                 fscache_stat(&fscache_n_attr_changed_nomem);
186                 _leave(" = -ENOMEM");
187                 return -ENOMEM;
188         }
189
190         fscache_operation_init(op, fscache_attr_changed_op, NULL);
191         op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
192
193         spin_lock(&cookie->lock);
194
195         if (hlist_empty(&cookie->backing_objects))
196                 goto nobufs;
197         object = hlist_entry(cookie->backing_objects.first,
198                              struct fscache_object, cookie_link);
199
200         if (fscache_submit_exclusive_op(object, op) < 0)
201                 goto nobufs;
202         spin_unlock(&cookie->lock);
203         fscache_stat(&fscache_n_attr_changed_ok);
204         fscache_put_operation(op);
205         _leave(" = 0");
206         return 0;
207
208 nobufs:
209         spin_unlock(&cookie->lock);
210         kfree(op);
211         fscache_stat(&fscache_n_attr_changed_nobufs);
212         _leave(" = %d", -ENOBUFS);
213         return -ENOBUFS;
214 }
215 EXPORT_SYMBOL(__fscache_attr_changed);
216
217 /*
218  * release a retrieval op reference
219  */
220 static void fscache_release_retrieval_op(struct fscache_operation *_op)
221 {
222         struct fscache_retrieval *op =
223                 container_of(_op, struct fscache_retrieval, op);
224
225         _enter("{OP%x}", op->op.debug_id);
226
227         ASSERTCMP(op->n_pages, ==, 0);
228
229         fscache_hist(fscache_retrieval_histogram, op->start_time);
230         if (op->context)
231                 fscache_put_context(op->op.object->cookie, op->context);
232
233         _leave("");
234 }
235
236 /*
237  * allocate a retrieval op
238  */
239 static struct fscache_retrieval *fscache_alloc_retrieval(
240         struct address_space *mapping,
241         fscache_rw_complete_t end_io_func,
242         void *context)
243 {
244         struct fscache_retrieval *op;
245
246         /* allocate a retrieval operation and attempt to submit it */
247         op = kzalloc(sizeof(*op), GFP_NOIO);
248         if (!op) {
249                 fscache_stat(&fscache_n_retrievals_nomem);
250                 return NULL;
251         }
252
253         fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
254         op->op.flags    = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
255         op->mapping     = mapping;
256         op->end_io_func = end_io_func;
257         op->context     = context;
258         op->start_time  = jiffies;
259         INIT_LIST_HEAD(&op->to_do);
260         return op;
261 }
262
263 /*
264  * wait for a deferred lookup to complete
265  */
266 static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
267 {
268         unsigned long jif;
269
270         _enter("");
271
272         if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
273                 _leave(" = 0 [imm]");
274                 return 0;
275         }
276
277         fscache_stat(&fscache_n_retrievals_wait);
278
279         jif = jiffies;
280         if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
281                         fscache_wait_bit_interruptible,
282                         TASK_INTERRUPTIBLE) != 0) {
283                 fscache_stat(&fscache_n_retrievals_intr);
284                 _leave(" = -ERESTARTSYS");
285                 return -ERESTARTSYS;
286         }
287
288         ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
289
290         smp_rmb();
291         fscache_hist(fscache_retrieval_delay_histogram, jif);
292         _leave(" = 0 [dly]");
293         return 0;
294 }
295
296 /*
297  * wait for an object to become active (or dead)
298  */
299 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
300                                                  struct fscache_retrieval *op,
301                                                  atomic_t *stat_op_waits,
302                                                  atomic_t *stat_object_dead)
303 {
304         int ret;
305
306         if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
307                 goto check_if_dead;
308
309         _debug(">>> WT");
310         fscache_stat(stat_op_waits);
311         if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
312                         fscache_wait_bit_interruptible,
313                         TASK_INTERRUPTIBLE) < 0) {
314                 ret = fscache_cancel_op(&op->op);
315                 if (ret == 0)
316                         return -ERESTARTSYS;
317
318                 /* it's been removed from the pending queue by another party,
319                  * so we should get to run shortly */
320                 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
321                             fscache_wait_bit, TASK_UNINTERRUPTIBLE);
322         }
323         _debug("<<< GO");
324
325 check_if_dead:
326         if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
327                 fscache_stat(stat_object_dead);
328                 _leave(" = -ENOBUFS [cancelled]");
329                 return -ENOBUFS;
330         }
331         if (unlikely(fscache_object_is_dead(object))) {
332                 fscache_stat(stat_object_dead);
333                 return -ENOBUFS;
334         }
335         return 0;
336 }
337
338 /*
339  * read a page from the cache or allocate a block in which to store it
340  * - we return:
341  *   -ENOMEM    - out of memory, nothing done
342  *   -ERESTARTSYS - interrupted
343  *   -ENOBUFS   - no backing object available in which to cache the block
344  *   -ENODATA   - no data available in the backing object for this block
345  *   0          - dispatched a read - it'll call end_io_func() when finished
346  */
347 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
348                                  struct page *page,
349                                  fscache_rw_complete_t end_io_func,
350                                  void *context,
351                                  gfp_t gfp)
352 {
353         struct fscache_retrieval *op;
354         struct fscache_object *object;
355         int ret;
356
357         _enter("%p,%p,,,", cookie, page);
358
359         fscache_stat(&fscache_n_retrievals);
360
361         if (hlist_empty(&cookie->backing_objects))
362                 goto nobufs;
363
364         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
365         ASSERTCMP(page, !=, NULL);
366
367         if (fscache_wait_for_deferred_lookup(cookie) < 0)
368                 return -ERESTARTSYS;
369
370         op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
371         if (!op) {
372                 _leave(" = -ENOMEM");
373                 return -ENOMEM;
374         }
375         op->n_pages = 1;
376
377         spin_lock(&cookie->lock);
378
379         if (hlist_empty(&cookie->backing_objects))
380                 goto nobufs_unlock;
381         object = hlist_entry(cookie->backing_objects.first,
382                              struct fscache_object, cookie_link);
383
384         ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
385
386         atomic_inc(&object->n_reads);
387         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
388
389         if (fscache_submit_op(object, &op->op) < 0)
390                 goto nobufs_unlock_dec;
391         spin_unlock(&cookie->lock);
392
393         fscache_stat(&fscache_n_retrieval_ops);
394
395         /* pin the netfs read context in case we need to do the actual netfs
396          * read because we've encountered a cache read failure */
397         fscache_get_context(object->cookie, op->context);
398
399         /* we wait for the operation to become active, and then process it
400          * *here*, in this thread, and not in the thread pool */
401         ret = fscache_wait_for_retrieval_activation(
402                 object, op,
403                 __fscache_stat(&fscache_n_retrieval_op_waits),
404                 __fscache_stat(&fscache_n_retrievals_object_dead));
405         if (ret < 0)
406                 goto error;
407
408         /* ask the cache to honour the operation */
409         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
410                 fscache_stat(&fscache_n_cop_allocate_page);
411                 ret = object->cache->ops->allocate_page(op, page, gfp);
412                 fscache_stat_d(&fscache_n_cop_allocate_page);
413                 if (ret == 0)
414                         ret = -ENODATA;
415         } else {
416                 fscache_stat(&fscache_n_cop_read_or_alloc_page);
417                 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
418                 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
419         }
420
421 error:
422         if (ret == -ENOMEM)
423                 fscache_stat(&fscache_n_retrievals_nomem);
424         else if (ret == -ERESTARTSYS)
425                 fscache_stat(&fscache_n_retrievals_intr);
426         else if (ret == -ENODATA)
427                 fscache_stat(&fscache_n_retrievals_nodata);
428         else if (ret < 0)
429                 fscache_stat(&fscache_n_retrievals_nobufs);
430         else
431                 fscache_stat(&fscache_n_retrievals_ok);
432
433         fscache_put_retrieval(op);
434         _leave(" = %d", ret);
435         return ret;
436
437 nobufs_unlock_dec:
438         atomic_dec(&object->n_reads);
439 nobufs_unlock:
440         spin_unlock(&cookie->lock);
441         kfree(op);
442 nobufs:
443         fscache_stat(&fscache_n_retrievals_nobufs);
444         _leave(" = -ENOBUFS");
445         return -ENOBUFS;
446 }
447 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
448
449 /*
450  * read a list of page from the cache or allocate a block in which to store
451  * them
452  * - we return:
453  *   -ENOMEM    - out of memory, some pages may be being read
454  *   -ERESTARTSYS - interrupted, some pages may be being read
455  *   -ENOBUFS   - no backing object or space available in which to cache any
456  *                pages not being read
457  *   -ENODATA   - no data available in the backing object for some or all of
458  *                the pages
459  *   0          - dispatched a read on all pages
460  *
461  * end_io_func() will be called for each page read from the cache as it is
462  * finishes being read
463  *
464  * any pages for which a read is dispatched will be removed from pages and
465  * nr_pages
466  */
467 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
468                                   struct address_space *mapping,
469                                   struct list_head *pages,
470                                   unsigned *nr_pages,
471                                   fscache_rw_complete_t end_io_func,
472                                   void *context,
473                                   gfp_t gfp)
474 {
475         struct fscache_retrieval *op;
476         struct fscache_object *object;
477         int ret;
478
479         _enter("%p,,%d,,,", cookie, *nr_pages);
480
481         fscache_stat(&fscache_n_retrievals);
482
483         if (hlist_empty(&cookie->backing_objects))
484                 goto nobufs;
485
486         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
487         ASSERTCMP(*nr_pages, >, 0);
488         ASSERT(!list_empty(pages));
489
490         if (fscache_wait_for_deferred_lookup(cookie) < 0)
491                 return -ERESTARTSYS;
492
493         op = fscache_alloc_retrieval(mapping, end_io_func, context);
494         if (!op)
495                 return -ENOMEM;
496         op->n_pages = *nr_pages;
497
498         spin_lock(&cookie->lock);
499
500         if (hlist_empty(&cookie->backing_objects))
501                 goto nobufs_unlock;
502         object = hlist_entry(cookie->backing_objects.first,
503                              struct fscache_object, cookie_link);
504
505         atomic_inc(&object->n_reads);
506         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
507
508         if (fscache_submit_op(object, &op->op) < 0)
509                 goto nobufs_unlock_dec;
510         spin_unlock(&cookie->lock);
511
512         fscache_stat(&fscache_n_retrieval_ops);
513
514         /* pin the netfs read context in case we need to do the actual netfs
515          * read because we've encountered a cache read failure */
516         fscache_get_context(object->cookie, op->context);
517
518         /* we wait for the operation to become active, and then process it
519          * *here*, in this thread, and not in the thread pool */
520         ret = fscache_wait_for_retrieval_activation(
521                 object, op,
522                 __fscache_stat(&fscache_n_retrieval_op_waits),
523                 __fscache_stat(&fscache_n_retrievals_object_dead));
524         if (ret < 0)
525                 goto error;
526
527         /* ask the cache to honour the operation */
528         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
529                 fscache_stat(&fscache_n_cop_allocate_pages);
530                 ret = object->cache->ops->allocate_pages(
531                         op, pages, nr_pages, gfp);
532                 fscache_stat_d(&fscache_n_cop_allocate_pages);
533         } else {
534                 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
535                 ret = object->cache->ops->read_or_alloc_pages(
536                         op, pages, nr_pages, gfp);
537                 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
538         }
539
540 error:
541         if (ret == -ENOMEM)
542                 fscache_stat(&fscache_n_retrievals_nomem);
543         else if (ret == -ERESTARTSYS)
544                 fscache_stat(&fscache_n_retrievals_intr);
545         else if (ret == -ENODATA)
546                 fscache_stat(&fscache_n_retrievals_nodata);
547         else if (ret < 0)
548                 fscache_stat(&fscache_n_retrievals_nobufs);
549         else
550                 fscache_stat(&fscache_n_retrievals_ok);
551
552         fscache_put_retrieval(op);
553         _leave(" = %d", ret);
554         return ret;
555
556 nobufs_unlock_dec:
557         atomic_dec(&object->n_reads);
558 nobufs_unlock:
559         spin_unlock(&cookie->lock);
560         kfree(op);
561 nobufs:
562         fscache_stat(&fscache_n_retrievals_nobufs);
563         _leave(" = -ENOBUFS");
564         return -ENOBUFS;
565 }
566 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
567
568 /*
569  * allocate a block in the cache on which to store a page
570  * - we return:
571  *   -ENOMEM    - out of memory, nothing done
572  *   -ERESTARTSYS - interrupted
573  *   -ENOBUFS   - no backing object available in which to cache the block
574  *   0          - block allocated
575  */
576 int __fscache_alloc_page(struct fscache_cookie *cookie,
577                          struct page *page,
578                          gfp_t gfp)
579 {
580         struct fscache_retrieval *op;
581         struct fscache_object *object;
582         int ret;
583
584         _enter("%p,%p,,,", cookie, page);
585
586         fscache_stat(&fscache_n_allocs);
587
588         if (hlist_empty(&cookie->backing_objects))
589                 goto nobufs;
590
591         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
592         ASSERTCMP(page, !=, NULL);
593
594         if (fscache_wait_for_deferred_lookup(cookie) < 0)
595                 return -ERESTARTSYS;
596
597         op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
598         if (!op)
599                 return -ENOMEM;
600         op->n_pages = 1;
601
602         spin_lock(&cookie->lock);
603
604         if (hlist_empty(&cookie->backing_objects))
605                 goto nobufs_unlock;
606         object = hlist_entry(cookie->backing_objects.first,
607                              struct fscache_object, cookie_link);
608
609         if (fscache_submit_op(object, &op->op) < 0)
610                 goto nobufs_unlock;
611         spin_unlock(&cookie->lock);
612
613         fscache_stat(&fscache_n_alloc_ops);
614
615         ret = fscache_wait_for_retrieval_activation(
616                 object, op,
617                 __fscache_stat(&fscache_n_alloc_op_waits),
618                 __fscache_stat(&fscache_n_allocs_object_dead));
619         if (ret < 0)
620                 goto error;
621
622         /* ask the cache to honour the operation */
623         fscache_stat(&fscache_n_cop_allocate_page);
624         ret = object->cache->ops->allocate_page(op, page, gfp);
625         fscache_stat_d(&fscache_n_cop_allocate_page);
626
627 error:
628         if (ret == -ERESTARTSYS)
629                 fscache_stat(&fscache_n_allocs_intr);
630         else if (ret < 0)
631                 fscache_stat(&fscache_n_allocs_nobufs);
632         else
633                 fscache_stat(&fscache_n_allocs_ok);
634
635         fscache_put_retrieval(op);
636         _leave(" = %d", ret);
637         return ret;
638
639 nobufs_unlock:
640         spin_unlock(&cookie->lock);
641         kfree(op);
642 nobufs:
643         fscache_stat(&fscache_n_allocs_nobufs);
644         _leave(" = -ENOBUFS");
645         return -ENOBUFS;
646 }
647 EXPORT_SYMBOL(__fscache_alloc_page);
648
649 /*
650  * release a write op reference
651  */
652 static void fscache_release_write_op(struct fscache_operation *_op)
653 {
654         _enter("{OP%x}", _op->debug_id);
655 }
656
657 /*
658  * perform the background storage of a page into the cache
659  */
660 static void fscache_write_op(struct fscache_operation *_op)
661 {
662         struct fscache_storage *op =
663                 container_of(_op, struct fscache_storage, op);
664         struct fscache_object *object = op->op.object;
665         struct fscache_cookie *cookie;
666         struct page *page;
667         unsigned n;
668         void *results[1];
669         int ret;
670
671         _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
672
673         spin_lock(&object->lock);
674         cookie = object->cookie;
675
676         if (!fscache_object_is_active(object) || !cookie) {
677                 spin_unlock(&object->lock);
678                 _leave("");
679                 return;
680         }
681
682         spin_lock(&cookie->stores_lock);
683
684         fscache_stat(&fscache_n_store_calls);
685
686         /* find a page to store */
687         page = NULL;
688         n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
689                                        FSCACHE_COOKIE_PENDING_TAG);
690         if (n != 1)
691                 goto superseded;
692         page = results[0];
693         _debug("gang %d [%lx]", n, page->index);
694         if (page->index > op->store_limit) {
695                 fscache_stat(&fscache_n_store_pages_over_limit);
696                 goto superseded;
697         }
698
699         radix_tree_tag_set(&cookie->stores, page->index,
700                            FSCACHE_COOKIE_STORING_TAG);
701         radix_tree_tag_clear(&cookie->stores, page->index,
702                              FSCACHE_COOKIE_PENDING_TAG);
703
704         spin_unlock(&cookie->stores_lock);
705         spin_unlock(&object->lock);
706
707         fscache_stat(&fscache_n_store_pages);
708         fscache_stat(&fscache_n_cop_write_page);
709         ret = object->cache->ops->write_page(op, page);
710         fscache_stat_d(&fscache_n_cop_write_page);
711         fscache_end_page_write(object, page);
712         if (ret < 0) {
713                 fscache_abort_object(object);
714                 fscache_op_complete(&op->op);
715         } else {
716                 fscache_enqueue_operation(&op->op);
717         }
718
719         _leave("");
720         return;
721
722 superseded:
723         /* this writer is going away and there aren't any more things to
724          * write */
725         _debug("cease");
726         spin_unlock(&cookie->stores_lock);
727         clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
728         spin_unlock(&object->lock);
729         fscache_op_complete(&op->op);
730         _leave("");
731 }
732
733 /*
734  * request a page be stored in the cache
735  * - returns:
736  *   -ENOMEM    - out of memory, nothing done
737  *   -ENOBUFS   - no backing object available in which to cache the page
738  *   0          - dispatched a write - it'll call end_io_func() when finished
739  *
740  * if the cookie still has a backing object at this point, that object can be
741  * in one of a few states with respect to storage processing:
742  *
743  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
744  *      set)
745  *
746  *      (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
747  *          fill op)
748  *
749  *      (b) writes deferred till post-creation (mark page for writing and
750  *          return immediately)
751  *
752  *  (2) negative lookup, object created, initial fill being made from netfs
753  *      (FSCACHE_COOKIE_INITIAL_FILL is set)
754  *
755  *      (a) fill point not yet reached this page (mark page for writing and
756  *          return)
757  *
758  *      (b) fill point passed this page (queue op to store this page)
759  *
760  *  (3) object extant (queue op to store this page)
761  *
762  * any other state is invalid
763  */
764 int __fscache_write_page(struct fscache_cookie *cookie,
765                          struct page *page,
766                          gfp_t gfp)
767 {
768         struct fscache_storage *op;
769         struct fscache_object *object;
770         int ret;
771
772         _enter("%p,%x,", cookie, (u32) page->flags);
773
774         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
775         ASSERT(PageFsCache(page));
776
777         fscache_stat(&fscache_n_stores);
778
779         op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
780         if (!op)
781                 goto nomem;
782
783         fscache_operation_init(&op->op, fscache_write_op,
784                                fscache_release_write_op);
785         op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
786
787         ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
788         if (ret < 0)
789                 goto nomem_free;
790
791         ret = -ENOBUFS;
792         spin_lock(&cookie->lock);
793
794         if (hlist_empty(&cookie->backing_objects))
795                 goto nobufs;
796         object = hlist_entry(cookie->backing_objects.first,
797                              struct fscache_object, cookie_link);
798         if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
799                 goto nobufs;
800
801         /* add the page to the pending-storage radix tree on the backing
802          * object */
803         spin_lock(&object->lock);
804         spin_lock(&cookie->stores_lock);
805
806         _debug("store limit %llx", (unsigned long long) object->store_limit);
807
808         ret = radix_tree_insert(&cookie->stores, page->index, page);
809         if (ret < 0) {
810                 if (ret == -EEXIST)
811                         goto already_queued;
812                 _debug("insert failed %d", ret);
813                 goto nobufs_unlock_obj;
814         }
815
816         radix_tree_tag_set(&cookie->stores, page->index,
817                            FSCACHE_COOKIE_PENDING_TAG);
818         page_cache_get(page);
819
820         /* we only want one writer at a time, but we do need to queue new
821          * writers after exclusive ops */
822         if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
823                 goto already_pending;
824
825         spin_unlock(&cookie->stores_lock);
826         spin_unlock(&object->lock);
827
828         op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
829         op->store_limit = object->store_limit;
830
831         if (fscache_submit_op(object, &op->op) < 0)
832                 goto submit_failed;
833
834         spin_unlock(&cookie->lock);
835         radix_tree_preload_end();
836         fscache_stat(&fscache_n_store_ops);
837         fscache_stat(&fscache_n_stores_ok);
838
839         /* the work queue now carries its own ref on the object */
840         fscache_put_operation(&op->op);
841         _leave(" = 0");
842         return 0;
843
844 already_queued:
845         fscache_stat(&fscache_n_stores_again);
846 already_pending:
847         spin_unlock(&cookie->stores_lock);
848         spin_unlock(&object->lock);
849         spin_unlock(&cookie->lock);
850         radix_tree_preload_end();
851         kfree(op);
852         fscache_stat(&fscache_n_stores_ok);
853         _leave(" = 0");
854         return 0;
855
856 submit_failed:
857         spin_lock(&cookie->stores_lock);
858         radix_tree_delete(&cookie->stores, page->index);
859         spin_unlock(&cookie->stores_lock);
860         page_cache_release(page);
861         ret = -ENOBUFS;
862         goto nobufs;
863
864 nobufs_unlock_obj:
865         spin_unlock(&cookie->stores_lock);
866         spin_unlock(&object->lock);
867 nobufs:
868         spin_unlock(&cookie->lock);
869         radix_tree_preload_end();
870         kfree(op);
871         fscache_stat(&fscache_n_stores_nobufs);
872         _leave(" = -ENOBUFS");
873         return -ENOBUFS;
874
875 nomem_free:
876         kfree(op);
877 nomem:
878         fscache_stat(&fscache_n_stores_oom);
879         _leave(" = -ENOMEM");
880         return -ENOMEM;
881 }
882 EXPORT_SYMBOL(__fscache_write_page);
883
884 /*
885  * remove a page from the cache
886  */
887 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
888 {
889         struct fscache_object *object;
890
891         _enter(",%p", page);
892
893         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
894         ASSERTCMP(page, !=, NULL);
895
896         fscache_stat(&fscache_n_uncaches);
897
898         /* cache withdrawal may beat us to it */
899         if (!PageFsCache(page))
900                 goto done;
901
902         /* get the object */
903         spin_lock(&cookie->lock);
904
905         if (hlist_empty(&cookie->backing_objects)) {
906                 ClearPageFsCache(page);
907                 goto done_unlock;
908         }
909
910         object = hlist_entry(cookie->backing_objects.first,
911                              struct fscache_object, cookie_link);
912
913         /* there might now be stuff on disk we could read */
914         clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
915
916         /* only invoke the cache backend if we managed to mark the page
917          * uncached here; this deals with synchronisation vs withdrawal */
918         if (TestClearPageFsCache(page) &&
919             object->cache->ops->uncache_page) {
920                 /* the cache backend releases the cookie lock */
921                 fscache_stat(&fscache_n_cop_uncache_page);
922                 object->cache->ops->uncache_page(object, page);
923                 fscache_stat_d(&fscache_n_cop_uncache_page);
924                 goto done;
925         }
926
927 done_unlock:
928         spin_unlock(&cookie->lock);
929 done:
930         _leave("");
931 }
932 EXPORT_SYMBOL(__fscache_uncache_page);
933
934 /**
935  * fscache_mark_page_cached - Mark a page as being cached
936  * @op: The retrieval op pages are being marked for
937  * @page: The page to be marked
938  *
939  * Mark a netfs page as being cached.  After this is called, the netfs
940  * must call fscache_uncache_page() to remove the mark.
941  */
942 void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
943 {
944         struct fscache_cookie *cookie = op->op.object->cookie;
945
946 #ifdef CONFIG_FSCACHE_STATS
947         atomic_inc(&fscache_n_marks);
948 #endif
949
950         _debug("- mark %p{%lx}", page, page->index);
951         if (TestSetPageFsCache(page)) {
952                 static bool once_only;
953                 if (!once_only) {
954                         once_only = true;
955                         printk(KERN_WARNING "FS-Cache:"
956                                " Cookie type %s marked page %lx"
957                                " multiple times\n",
958                                cookie->def->name, page->index);
959                 }
960         }
961
962         if (cookie->def->mark_page_cached)
963                 cookie->def->mark_page_cached(cookie->netfs_data,
964                                               op->mapping, page);
965 }
966 EXPORT_SYMBOL(fscache_mark_page_cached);
967
968 /**
969  * fscache_mark_pages_cached - Mark pages as being cached
970  * @op: The retrieval op pages are being marked for
971  * @pagevec: The pages to be marked
972  *
973  * Mark a bunch of netfs pages as being cached.  After this is called,
974  * the netfs must call fscache_uncache_page() to remove the mark.
975  */
976 void fscache_mark_pages_cached(struct fscache_retrieval *op,
977                                struct pagevec *pagevec)
978 {
979         unsigned long loop;
980
981         for (loop = 0; loop < pagevec->nr; loop++)
982                 fscache_mark_page_cached(op, pagevec->pages[loop]);
983
984         pagevec_reinit(pagevec);
985 }
986 EXPORT_SYMBOL(fscache_mark_pages_cached);
987
988 /*
989  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
990  * to be associated with the given cookie.
991  */
992 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
993                                        struct inode *inode)
994 {
995         struct address_space *mapping = inode->i_mapping;
996         struct pagevec pvec;
997         pgoff_t next;
998         int i;
999
1000         _enter("%p,%p", cookie, inode);
1001
1002         if (!mapping || mapping->nrpages == 0) {
1003                 _leave(" [no pages]");
1004                 return;
1005         }
1006
1007         pagevec_init(&pvec, 0);
1008         next = 0;
1009         do {
1010                 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
1011                         break;
1012                 for (i = 0; i < pagevec_count(&pvec); i++) {
1013                         struct page *page = pvec.pages[i];
1014                         next = page->index;
1015                         if (PageFsCache(page)) {
1016                                 __fscache_wait_on_page_write(cookie, page);
1017                                 __fscache_uncache_page(cookie, page);
1018                         }
1019                 }
1020                 pagevec_release(&pvec);
1021                 cond_resched();
1022         } while (++next);
1023
1024         _leave("");
1025 }
1026 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);