]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - fs/fscache/page.c
5b5d9081c8b2849ef819f3645adface6ade7591b
[karo-tx-linux.git] / fs / fscache / page.c
1 /* Cache page management and data I/O routines
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #define FSCACHE_DEBUG_LEVEL PAGE
13 #include <linux/module.h>
14 #include <linux/fscache-cache.h>
15 #include <linux/buffer_head.h>
16 #include <linux/pagevec.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19
20 /*
21  * check to see if a page is being written to the cache
22  */
23 bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
24 {
25         void *val;
26
27         rcu_read_lock();
28         val = radix_tree_lookup(&cookie->stores, page->index);
29         rcu_read_unlock();
30
31         return val != NULL;
32 }
33 EXPORT_SYMBOL(__fscache_check_page_write);
34
35 /*
36  * wait for a page to finish being written to the cache
37  */
38 void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
39 {
40         wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
41
42         wait_event(*wq, !__fscache_check_page_write(cookie, page));
43 }
44 EXPORT_SYMBOL(__fscache_wait_on_page_write);
45
46 /*
47  * decide whether a page can be released, possibly by cancelling a store to it
48  * - we're allowed to sleep if __GFP_WAIT is flagged
49  */
50 bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
51                                   struct page *page,
52                                   gfp_t gfp)
53 {
54         struct page *xpage;
55         void *val;
56
57         _enter("%p,%p,%x", cookie, page, gfp);
58
59 try_again:
60         rcu_read_lock();
61         val = radix_tree_lookup(&cookie->stores, page->index);
62         if (!val) {
63                 rcu_read_unlock();
64                 fscache_stat(&fscache_n_store_vmscan_not_storing);
65                 __fscache_uncache_page(cookie, page);
66                 return true;
67         }
68
69         /* see if the page is actually undergoing storage - if so we can't get
70          * rid of it till the cache has finished with it */
71         if (radix_tree_tag_get(&cookie->stores, page->index,
72                                FSCACHE_COOKIE_STORING_TAG)) {
73                 rcu_read_unlock();
74                 goto page_busy;
75         }
76
77         /* the page is pending storage, so we attempt to cancel the store and
78          * discard the store request so that the page can be reclaimed */
79         spin_lock(&cookie->stores_lock);
80         rcu_read_unlock();
81
82         if (radix_tree_tag_get(&cookie->stores, page->index,
83                                FSCACHE_COOKIE_STORING_TAG)) {
84                 /* the page started to undergo storage whilst we were looking,
85                  * so now we can only wait or return */
86                 spin_unlock(&cookie->stores_lock);
87                 goto page_busy;
88         }
89
90         xpage = radix_tree_delete(&cookie->stores, page->index);
91         spin_unlock(&cookie->stores_lock);
92
93         if (xpage) {
94                 fscache_stat(&fscache_n_store_vmscan_cancelled);
95                 fscache_stat(&fscache_n_store_radix_deletes);
96                 ASSERTCMP(xpage, ==, page);
97         } else {
98                 fscache_stat(&fscache_n_store_vmscan_gone);
99         }
100
101         wake_up_bit(&cookie->flags, 0);
102         if (xpage)
103                 page_cache_release(xpage);
104         __fscache_uncache_page(cookie, page);
105         return true;
106
107 page_busy:
108         /* We will wait here if we're allowed to, but that could deadlock the
109          * allocator as the work threads writing to the cache may all end up
110          * sleeping on memory allocation, so we may need to impose a timeout
111          * too. */
112         if (!(gfp & __GFP_WAIT)) {
113                 fscache_stat(&fscache_n_store_vmscan_busy);
114                 return false;
115         }
116
117         fscache_stat(&fscache_n_store_vmscan_wait);
118         __fscache_wait_on_page_write(cookie, page);
119         gfp &= ~__GFP_WAIT;
120         goto try_again;
121 }
122 EXPORT_SYMBOL(__fscache_maybe_release_page);
123
124 /*
125  * note that a page has finished being written to the cache
126  */
127 static void fscache_end_page_write(struct fscache_object *object,
128                                    struct page *page)
129 {
130         struct fscache_cookie *cookie;
131         struct page *xpage = NULL;
132
133         spin_lock(&object->lock);
134         cookie = object->cookie;
135         if (cookie) {
136                 /* delete the page from the tree if it is now no longer
137                  * pending */
138                 spin_lock(&cookie->stores_lock);
139                 radix_tree_tag_clear(&cookie->stores, page->index,
140                                      FSCACHE_COOKIE_STORING_TAG);
141                 if (!radix_tree_tag_get(&cookie->stores, page->index,
142                                         FSCACHE_COOKIE_PENDING_TAG)) {
143                         fscache_stat(&fscache_n_store_radix_deletes);
144                         xpage = radix_tree_delete(&cookie->stores, page->index);
145                 }
146                 spin_unlock(&cookie->stores_lock);
147                 wake_up_bit(&cookie->flags, 0);
148         }
149         spin_unlock(&object->lock);
150         if (xpage)
151                 page_cache_release(xpage);
152 }
153
154 /*
155  * actually apply the changed attributes to a cache object
156  */
157 static void fscache_attr_changed_op(struct fscache_operation *op)
158 {
159         struct fscache_object *object = op->object;
160         int ret;
161
162         _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
163
164         fscache_stat(&fscache_n_attr_changed_calls);
165
166         if (fscache_object_is_active(object)) {
167                 fscache_stat(&fscache_n_cop_attr_changed);
168                 ret = object->cache->ops->attr_changed(object);
169                 fscache_stat_d(&fscache_n_cop_attr_changed);
170                 if (ret < 0)
171                         fscache_abort_object(object);
172         }
173
174         fscache_op_complete(op);
175         _leave("");
176 }
177
178 /*
179  * notification that the attributes on an object have changed
180  */
181 int __fscache_attr_changed(struct fscache_cookie *cookie)
182 {
183         struct fscache_operation *op;
184         struct fscache_object *object;
185
186         _enter("%p", cookie);
187
188         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
189
190         fscache_stat(&fscache_n_attr_changed);
191
192         op = kzalloc(sizeof(*op), GFP_KERNEL);
193         if (!op) {
194                 fscache_stat(&fscache_n_attr_changed_nomem);
195                 _leave(" = -ENOMEM");
196                 return -ENOMEM;
197         }
198
199         fscache_operation_init(op, fscache_attr_changed_op, NULL);
200         op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
201
202         spin_lock(&cookie->lock);
203
204         if (hlist_empty(&cookie->backing_objects))
205                 goto nobufs;
206         object = hlist_entry(cookie->backing_objects.first,
207                              struct fscache_object, cookie_link);
208
209         if (fscache_submit_exclusive_op(object, op) < 0)
210                 goto nobufs;
211         spin_unlock(&cookie->lock);
212         fscache_stat(&fscache_n_attr_changed_ok);
213         fscache_put_operation(op);
214         _leave(" = 0");
215         return 0;
216
217 nobufs:
218         spin_unlock(&cookie->lock);
219         kfree(op);
220         fscache_stat(&fscache_n_attr_changed_nobufs);
221         _leave(" = %d", -ENOBUFS);
222         return -ENOBUFS;
223 }
224 EXPORT_SYMBOL(__fscache_attr_changed);
225
226 /*
227  * release a retrieval op reference
228  */
229 static void fscache_release_retrieval_op(struct fscache_operation *_op)
230 {
231         struct fscache_retrieval *op =
232                 container_of(_op, struct fscache_retrieval, op);
233
234         _enter("{OP%x}", op->op.debug_id);
235
236         ASSERTCMP(op->n_pages, ==, 0);
237
238         fscache_hist(fscache_retrieval_histogram, op->start_time);
239         if (op->context)
240                 fscache_put_context(op->op.object->cookie, op->context);
241
242         _leave("");
243 }
244
245 /*
246  * allocate a retrieval op
247  */
248 static struct fscache_retrieval *fscache_alloc_retrieval(
249         struct address_space *mapping,
250         fscache_rw_complete_t end_io_func,
251         void *context)
252 {
253         struct fscache_retrieval *op;
254
255         /* allocate a retrieval operation and attempt to submit it */
256         op = kzalloc(sizeof(*op), GFP_NOIO);
257         if (!op) {
258                 fscache_stat(&fscache_n_retrievals_nomem);
259                 return NULL;
260         }
261
262         fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
263         op->op.flags    = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
264         op->mapping     = mapping;
265         op->end_io_func = end_io_func;
266         op->context     = context;
267         op->start_time  = jiffies;
268         INIT_LIST_HEAD(&op->to_do);
269         return op;
270 }
271
272 /*
273  * wait for a deferred lookup to complete
274  */
275 static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
276 {
277         unsigned long jif;
278
279         _enter("");
280
281         if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
282                 _leave(" = 0 [imm]");
283                 return 0;
284         }
285
286         fscache_stat(&fscache_n_retrievals_wait);
287
288         jif = jiffies;
289         if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
290                         fscache_wait_bit_interruptible,
291                         TASK_INTERRUPTIBLE) != 0) {
292                 fscache_stat(&fscache_n_retrievals_intr);
293                 _leave(" = -ERESTARTSYS");
294                 return -ERESTARTSYS;
295         }
296
297         ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
298
299         smp_rmb();
300         fscache_hist(fscache_retrieval_delay_histogram, jif);
301         _leave(" = 0 [dly]");
302         return 0;
303 }
304
305 /*
306  * wait for an object to become active (or dead)
307  */
308 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
309                                                  struct fscache_retrieval *op,
310                                                  atomic_t *stat_op_waits,
311                                                  atomic_t *stat_object_dead)
312 {
313         int ret;
314
315         if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
316                 goto check_if_dead;
317
318         _debug(">>> WT");
319         fscache_stat(stat_op_waits);
320         if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
321                         fscache_wait_bit_interruptible,
322                         TASK_INTERRUPTIBLE) != 0) {
323                 ret = fscache_cancel_op(&op->op);
324                 if (ret == 0)
325                         return -ERESTARTSYS;
326
327                 /* it's been removed from the pending queue by another party,
328                  * so we should get to run shortly */
329                 wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
330                             fscache_wait_bit, TASK_UNINTERRUPTIBLE);
331         }
332         _debug("<<< GO");
333
334 check_if_dead:
335         if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
336                 fscache_stat(stat_object_dead);
337                 _leave(" = -ENOBUFS [cancelled]");
338                 return -ENOBUFS;
339         }
340         if (unlikely(fscache_object_is_dead(object))) {
341                 pr_err("%s() = -ENOBUFS [obj dead %d]", __func__, op->op.state);
342                 fscache_cancel_op(&op->op);
343                 fscache_stat(stat_object_dead);
344                 return -ENOBUFS;
345         }
346         return 0;
347 }
348
349 /*
350  * read a page from the cache or allocate a block in which to store it
351  * - we return:
352  *   -ENOMEM    - out of memory, nothing done
353  *   -ERESTARTSYS - interrupted
354  *   -ENOBUFS   - no backing object available in which to cache the block
355  *   -ENODATA   - no data available in the backing object for this block
356  *   0          - dispatched a read - it'll call end_io_func() when finished
357  */
358 int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
359                                  struct page *page,
360                                  fscache_rw_complete_t end_io_func,
361                                  void *context,
362                                  gfp_t gfp)
363 {
364         struct fscache_retrieval *op;
365         struct fscache_object *object;
366         int ret;
367
368         _enter("%p,%p,,,", cookie, page);
369
370         fscache_stat(&fscache_n_retrievals);
371
372         if (hlist_empty(&cookie->backing_objects))
373                 goto nobufs;
374
375         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
376                 _leave(" = -ENOBUFS [invalidating]");
377                 return -ENOBUFS;
378         }
379
380         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
381         ASSERTCMP(page, !=, NULL);
382
383         if (fscache_wait_for_deferred_lookup(cookie) < 0)
384                 return -ERESTARTSYS;
385
386         op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
387         if (!op) {
388                 _leave(" = -ENOMEM");
389                 return -ENOMEM;
390         }
391         op->n_pages = 1;
392
393         spin_lock(&cookie->lock);
394
395         if (hlist_empty(&cookie->backing_objects))
396                 goto nobufs_unlock;
397         object = hlist_entry(cookie->backing_objects.first,
398                              struct fscache_object, cookie_link);
399
400         ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
401
402         atomic_inc(&object->n_reads);
403         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
404
405         if (fscache_submit_op(object, &op->op) < 0)
406                 goto nobufs_unlock_dec;
407         spin_unlock(&cookie->lock);
408
409         fscache_stat(&fscache_n_retrieval_ops);
410
411         /* pin the netfs read context in case we need to do the actual netfs
412          * read because we've encountered a cache read failure */
413         fscache_get_context(object->cookie, op->context);
414
415         /* we wait for the operation to become active, and then process it
416          * *here*, in this thread, and not in the thread pool */
417         ret = fscache_wait_for_retrieval_activation(
418                 object, op,
419                 __fscache_stat(&fscache_n_retrieval_op_waits),
420                 __fscache_stat(&fscache_n_retrievals_object_dead));
421         if (ret < 0)
422                 goto error;
423
424         /* ask the cache to honour the operation */
425         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
426                 fscache_stat(&fscache_n_cop_allocate_page);
427                 ret = object->cache->ops->allocate_page(op, page, gfp);
428                 fscache_stat_d(&fscache_n_cop_allocate_page);
429                 if (ret == 0)
430                         ret = -ENODATA;
431         } else {
432                 fscache_stat(&fscache_n_cop_read_or_alloc_page);
433                 ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
434                 fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
435         }
436
437 error:
438         if (ret == -ENOMEM)
439                 fscache_stat(&fscache_n_retrievals_nomem);
440         else if (ret == -ERESTARTSYS)
441                 fscache_stat(&fscache_n_retrievals_intr);
442         else if (ret == -ENODATA)
443                 fscache_stat(&fscache_n_retrievals_nodata);
444         else if (ret < 0)
445                 fscache_stat(&fscache_n_retrievals_nobufs);
446         else
447                 fscache_stat(&fscache_n_retrievals_ok);
448
449         fscache_put_retrieval(op);
450         _leave(" = %d", ret);
451         return ret;
452
453 nobufs_unlock_dec:
454         atomic_dec(&object->n_reads);
455 nobufs_unlock:
456         spin_unlock(&cookie->lock);
457         kfree(op);
458 nobufs:
459         fscache_stat(&fscache_n_retrievals_nobufs);
460         _leave(" = -ENOBUFS");
461         return -ENOBUFS;
462 }
463 EXPORT_SYMBOL(__fscache_read_or_alloc_page);
464
465 /*
466  * read a list of page from the cache or allocate a block in which to store
467  * them
468  * - we return:
469  *   -ENOMEM    - out of memory, some pages may be being read
470  *   -ERESTARTSYS - interrupted, some pages may be being read
471  *   -ENOBUFS   - no backing object or space available in which to cache any
472  *                pages not being read
473  *   -ENODATA   - no data available in the backing object for some or all of
474  *                the pages
475  *   0          - dispatched a read on all pages
476  *
477  * end_io_func() will be called for each page read from the cache as it is
478  * finishes being read
479  *
480  * any pages for which a read is dispatched will be removed from pages and
481  * nr_pages
482  */
483 int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
484                                   struct address_space *mapping,
485                                   struct list_head *pages,
486                                   unsigned *nr_pages,
487                                   fscache_rw_complete_t end_io_func,
488                                   void *context,
489                                   gfp_t gfp)
490 {
491         struct fscache_retrieval *op;
492         struct fscache_object *object;
493         int ret;
494
495         _enter("%p,,%d,,,", cookie, *nr_pages);
496
497         fscache_stat(&fscache_n_retrievals);
498
499         if (hlist_empty(&cookie->backing_objects))
500                 goto nobufs;
501
502         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
503                 _leave(" = -ENOBUFS [invalidating]");
504                 return -ENOBUFS;
505         }
506
507         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
508         ASSERTCMP(*nr_pages, >, 0);
509         ASSERT(!list_empty(pages));
510
511         if (fscache_wait_for_deferred_lookup(cookie) < 0)
512                 return -ERESTARTSYS;
513
514         op = fscache_alloc_retrieval(mapping, end_io_func, context);
515         if (!op)
516                 return -ENOMEM;
517         op->n_pages = *nr_pages;
518
519         spin_lock(&cookie->lock);
520
521         if (hlist_empty(&cookie->backing_objects))
522                 goto nobufs_unlock;
523         object = hlist_entry(cookie->backing_objects.first,
524                              struct fscache_object, cookie_link);
525
526         atomic_inc(&object->n_reads);
527         __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
528
529         if (fscache_submit_op(object, &op->op) < 0)
530                 goto nobufs_unlock_dec;
531         spin_unlock(&cookie->lock);
532
533         fscache_stat(&fscache_n_retrieval_ops);
534
535         /* pin the netfs read context in case we need to do the actual netfs
536          * read because we've encountered a cache read failure */
537         fscache_get_context(object->cookie, op->context);
538
539         /* we wait for the operation to become active, and then process it
540          * *here*, in this thread, and not in the thread pool */
541         ret = fscache_wait_for_retrieval_activation(
542                 object, op,
543                 __fscache_stat(&fscache_n_retrieval_op_waits),
544                 __fscache_stat(&fscache_n_retrievals_object_dead));
545         if (ret < 0)
546                 goto error;
547
548         /* ask the cache to honour the operation */
549         if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
550                 fscache_stat(&fscache_n_cop_allocate_pages);
551                 ret = object->cache->ops->allocate_pages(
552                         op, pages, nr_pages, gfp);
553                 fscache_stat_d(&fscache_n_cop_allocate_pages);
554         } else {
555                 fscache_stat(&fscache_n_cop_read_or_alloc_pages);
556                 ret = object->cache->ops->read_or_alloc_pages(
557                         op, pages, nr_pages, gfp);
558                 fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
559         }
560
561 error:
562         if (ret == -ENOMEM)
563                 fscache_stat(&fscache_n_retrievals_nomem);
564         else if (ret == -ERESTARTSYS)
565                 fscache_stat(&fscache_n_retrievals_intr);
566         else if (ret == -ENODATA)
567                 fscache_stat(&fscache_n_retrievals_nodata);
568         else if (ret < 0)
569                 fscache_stat(&fscache_n_retrievals_nobufs);
570         else
571                 fscache_stat(&fscache_n_retrievals_ok);
572
573         fscache_put_retrieval(op);
574         _leave(" = %d", ret);
575         return ret;
576
577 nobufs_unlock_dec:
578         atomic_dec(&object->n_reads);
579 nobufs_unlock:
580         spin_unlock(&cookie->lock);
581         kfree(op);
582 nobufs:
583         fscache_stat(&fscache_n_retrievals_nobufs);
584         _leave(" = -ENOBUFS");
585         return -ENOBUFS;
586 }
587 EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
588
589 /*
590  * allocate a block in the cache on which to store a page
591  * - we return:
592  *   -ENOMEM    - out of memory, nothing done
593  *   -ERESTARTSYS - interrupted
594  *   -ENOBUFS   - no backing object available in which to cache the block
595  *   0          - block allocated
596  */
597 int __fscache_alloc_page(struct fscache_cookie *cookie,
598                          struct page *page,
599                          gfp_t gfp)
600 {
601         struct fscache_retrieval *op;
602         struct fscache_object *object;
603         int ret;
604
605         _enter("%p,%p,,,", cookie, page);
606
607         fscache_stat(&fscache_n_allocs);
608
609         if (hlist_empty(&cookie->backing_objects))
610                 goto nobufs;
611
612         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
613         ASSERTCMP(page, !=, NULL);
614
615         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
616                 _leave(" = -ENOBUFS [invalidating]");
617                 return -ENOBUFS;
618         }
619
620         if (fscache_wait_for_deferred_lookup(cookie) < 0)
621                 return -ERESTARTSYS;
622
623         op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
624         if (!op)
625                 return -ENOMEM;
626         op->n_pages = 1;
627
628         spin_lock(&cookie->lock);
629
630         if (hlist_empty(&cookie->backing_objects))
631                 goto nobufs_unlock;
632         object = hlist_entry(cookie->backing_objects.first,
633                              struct fscache_object, cookie_link);
634
635         if (fscache_submit_op(object, &op->op) < 0)
636                 goto nobufs_unlock;
637         spin_unlock(&cookie->lock);
638
639         fscache_stat(&fscache_n_alloc_ops);
640
641         ret = fscache_wait_for_retrieval_activation(
642                 object, op,
643                 __fscache_stat(&fscache_n_alloc_op_waits),
644                 __fscache_stat(&fscache_n_allocs_object_dead));
645         if (ret < 0)
646                 goto error;
647
648         /* ask the cache to honour the operation */
649         fscache_stat(&fscache_n_cop_allocate_page);
650         ret = object->cache->ops->allocate_page(op, page, gfp);
651         fscache_stat_d(&fscache_n_cop_allocate_page);
652
653 error:
654         if (ret == -ERESTARTSYS)
655                 fscache_stat(&fscache_n_allocs_intr);
656         else if (ret < 0)
657                 fscache_stat(&fscache_n_allocs_nobufs);
658         else
659                 fscache_stat(&fscache_n_allocs_ok);
660
661         fscache_put_retrieval(op);
662         _leave(" = %d", ret);
663         return ret;
664
665 nobufs_unlock:
666         spin_unlock(&cookie->lock);
667         kfree(op);
668 nobufs:
669         fscache_stat(&fscache_n_allocs_nobufs);
670         _leave(" = -ENOBUFS");
671         return -ENOBUFS;
672 }
673 EXPORT_SYMBOL(__fscache_alloc_page);
674
675 /*
676  * release a write op reference
677  */
678 static void fscache_release_write_op(struct fscache_operation *_op)
679 {
680         _enter("{OP%x}", _op->debug_id);
681 }
682
683 /*
684  * perform the background storage of a page into the cache
685  */
686 static void fscache_write_op(struct fscache_operation *_op)
687 {
688         struct fscache_storage *op =
689                 container_of(_op, struct fscache_storage, op);
690         struct fscache_object *object = op->op.object;
691         struct fscache_cookie *cookie;
692         struct page *page;
693         unsigned n;
694         void *results[1];
695         int ret;
696
697         _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
698
699         spin_lock(&object->lock);
700         cookie = object->cookie;
701
702         if (!fscache_object_is_active(object) || !cookie) {
703                 spin_unlock(&object->lock);
704                 _leave("");
705                 return;
706         }
707
708         spin_lock(&cookie->stores_lock);
709
710         fscache_stat(&fscache_n_store_calls);
711
712         /* find a page to store */
713         page = NULL;
714         n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
715                                        FSCACHE_COOKIE_PENDING_TAG);
716         if (n != 1)
717                 goto superseded;
718         page = results[0];
719         _debug("gang %d [%lx]", n, page->index);
720         if (page->index > op->store_limit) {
721                 fscache_stat(&fscache_n_store_pages_over_limit);
722                 goto superseded;
723         }
724
725         radix_tree_tag_set(&cookie->stores, page->index,
726                            FSCACHE_COOKIE_STORING_TAG);
727         radix_tree_tag_clear(&cookie->stores, page->index,
728                              FSCACHE_COOKIE_PENDING_TAG);
729
730         spin_unlock(&cookie->stores_lock);
731         spin_unlock(&object->lock);
732
733         fscache_stat(&fscache_n_store_pages);
734         fscache_stat(&fscache_n_cop_write_page);
735         ret = object->cache->ops->write_page(op, page);
736         fscache_stat_d(&fscache_n_cop_write_page);
737         fscache_end_page_write(object, page);
738         if (ret < 0) {
739                 fscache_abort_object(object);
740                 fscache_op_complete(&op->op);
741         } else {
742                 fscache_enqueue_operation(&op->op);
743         }
744
745         _leave("");
746         return;
747
748 superseded:
749         /* this writer is going away and there aren't any more things to
750          * write */
751         _debug("cease");
752         spin_unlock(&cookie->stores_lock);
753         clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
754         spin_unlock(&object->lock);
755         fscache_op_complete(&op->op);
756         _leave("");
757 }
758
759 /*
760  * Clear the pages pending writing for invalidation
761  */
762 void fscache_invalidate_writes(struct fscache_cookie *cookie)
763 {
764         struct page *page;
765         void *results[16];
766         int n, i;
767
768         _enter("");
769
770         while (spin_lock(&cookie->stores_lock),
771                n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
772                                               ARRAY_SIZE(results),
773                                               FSCACHE_COOKIE_PENDING_TAG),
774                n > 0) {
775                 for (i = n - 1; i >= 0; i--) {
776                         page = results[i];
777                         radix_tree_delete(&cookie->stores, page->index);
778                 }
779
780                 spin_unlock(&cookie->stores_lock);
781
782                 for (i = n - 1; i >= 0; i--)
783                         page_cache_release(results[i]);
784         }
785
786         spin_unlock(&cookie->stores_lock);
787         _leave("");
788 }
789
790 /*
791  * request a page be stored in the cache
792  * - returns:
793  *   -ENOMEM    - out of memory, nothing done
794  *   -ENOBUFS   - no backing object available in which to cache the page
795  *   0          - dispatched a write - it'll call end_io_func() when finished
796  *
797  * if the cookie still has a backing object at this point, that object can be
798  * in one of a few states with respect to storage processing:
799  *
800  *  (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
801  *      set)
802  *
803  *      (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
804  *          fill op)
805  *
806  *      (b) writes deferred till post-creation (mark page for writing and
807  *          return immediately)
808  *
809  *  (2) negative lookup, object created, initial fill being made from netfs
810  *      (FSCACHE_COOKIE_INITIAL_FILL is set)
811  *
812  *      (a) fill point not yet reached this page (mark page for writing and
813  *          return)
814  *
815  *      (b) fill point passed this page (queue op to store this page)
816  *
817  *  (3) object extant (queue op to store this page)
818  *
819  * any other state is invalid
820  */
821 int __fscache_write_page(struct fscache_cookie *cookie,
822                          struct page *page,
823                          gfp_t gfp)
824 {
825         struct fscache_storage *op;
826         struct fscache_object *object;
827         int ret;
828
829         _enter("%p,%x,", cookie, (u32) page->flags);
830
831         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
832         ASSERT(PageFsCache(page));
833
834         fscache_stat(&fscache_n_stores);
835
836         if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
837                 _leave(" = -ENOBUFS [invalidating]");
838                 return -ENOBUFS;
839         }
840
841         op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
842         if (!op)
843                 goto nomem;
844
845         fscache_operation_init(&op->op, fscache_write_op,
846                                fscache_release_write_op);
847         op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
848
849         ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
850         if (ret < 0)
851                 goto nomem_free;
852
853         ret = -ENOBUFS;
854         spin_lock(&cookie->lock);
855
856         if (hlist_empty(&cookie->backing_objects))
857                 goto nobufs;
858         object = hlist_entry(cookie->backing_objects.first,
859                              struct fscache_object, cookie_link);
860         if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
861                 goto nobufs;
862
863         /* add the page to the pending-storage radix tree on the backing
864          * object */
865         spin_lock(&object->lock);
866         spin_lock(&cookie->stores_lock);
867
868         _debug("store limit %llx", (unsigned long long) object->store_limit);
869
870         ret = radix_tree_insert(&cookie->stores, page->index, page);
871         if (ret < 0) {
872                 if (ret == -EEXIST)
873                         goto already_queued;
874                 _debug("insert failed %d", ret);
875                 goto nobufs_unlock_obj;
876         }
877
878         radix_tree_tag_set(&cookie->stores, page->index,
879                            FSCACHE_COOKIE_PENDING_TAG);
880         page_cache_get(page);
881
882         /* we only want one writer at a time, but we do need to queue new
883          * writers after exclusive ops */
884         if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
885                 goto already_pending;
886
887         spin_unlock(&cookie->stores_lock);
888         spin_unlock(&object->lock);
889
890         op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
891         op->store_limit = object->store_limit;
892
893         if (fscache_submit_op(object, &op->op) < 0)
894                 goto submit_failed;
895
896         spin_unlock(&cookie->lock);
897         radix_tree_preload_end();
898         fscache_stat(&fscache_n_store_ops);
899         fscache_stat(&fscache_n_stores_ok);
900
901         /* the work queue now carries its own ref on the object */
902         fscache_put_operation(&op->op);
903         _leave(" = 0");
904         return 0;
905
906 already_queued:
907         fscache_stat(&fscache_n_stores_again);
908 already_pending:
909         spin_unlock(&cookie->stores_lock);
910         spin_unlock(&object->lock);
911         spin_unlock(&cookie->lock);
912         radix_tree_preload_end();
913         kfree(op);
914         fscache_stat(&fscache_n_stores_ok);
915         _leave(" = 0");
916         return 0;
917
918 submit_failed:
919         spin_lock(&cookie->stores_lock);
920         radix_tree_delete(&cookie->stores, page->index);
921         spin_unlock(&cookie->stores_lock);
922         page_cache_release(page);
923         ret = -ENOBUFS;
924         goto nobufs;
925
926 nobufs_unlock_obj:
927         spin_unlock(&cookie->stores_lock);
928         spin_unlock(&object->lock);
929 nobufs:
930         spin_unlock(&cookie->lock);
931         radix_tree_preload_end();
932         kfree(op);
933         fscache_stat(&fscache_n_stores_nobufs);
934         _leave(" = -ENOBUFS");
935         return -ENOBUFS;
936
937 nomem_free:
938         kfree(op);
939 nomem:
940         fscache_stat(&fscache_n_stores_oom);
941         _leave(" = -ENOMEM");
942         return -ENOMEM;
943 }
944 EXPORT_SYMBOL(__fscache_write_page);
945
946 /*
947  * remove a page from the cache
948  */
949 void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
950 {
951         struct fscache_object *object;
952
953         _enter(",%p", page);
954
955         ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
956         ASSERTCMP(page, !=, NULL);
957
958         fscache_stat(&fscache_n_uncaches);
959
960         /* cache withdrawal may beat us to it */
961         if (!PageFsCache(page))
962                 goto done;
963
964         /* get the object */
965         spin_lock(&cookie->lock);
966
967         if (hlist_empty(&cookie->backing_objects)) {
968                 ClearPageFsCache(page);
969                 goto done_unlock;
970         }
971
972         object = hlist_entry(cookie->backing_objects.first,
973                              struct fscache_object, cookie_link);
974
975         /* there might now be stuff on disk we could read */
976         clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
977
978         /* only invoke the cache backend if we managed to mark the page
979          * uncached here; this deals with synchronisation vs withdrawal */
980         if (TestClearPageFsCache(page) &&
981             object->cache->ops->uncache_page) {
982                 /* the cache backend releases the cookie lock */
983                 fscache_stat(&fscache_n_cop_uncache_page);
984                 object->cache->ops->uncache_page(object, page);
985                 fscache_stat_d(&fscache_n_cop_uncache_page);
986                 goto done;
987         }
988
989 done_unlock:
990         spin_unlock(&cookie->lock);
991 done:
992         _leave("");
993 }
994 EXPORT_SYMBOL(__fscache_uncache_page);
995
996 /**
997  * fscache_mark_page_cached - Mark a page as being cached
998  * @op: The retrieval op pages are being marked for
999  * @page: The page to be marked
1000  *
1001  * Mark a netfs page as being cached.  After this is called, the netfs
1002  * must call fscache_uncache_page() to remove the mark.
1003  */
1004 void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
1005 {
1006         struct fscache_cookie *cookie = op->op.object->cookie;
1007
1008 #ifdef CONFIG_FSCACHE_STATS
1009         atomic_inc(&fscache_n_marks);
1010 #endif
1011
1012         _debug("- mark %p{%lx}", page, page->index);
1013         if (TestSetPageFsCache(page)) {
1014                 static bool once_only;
1015                 if (!once_only) {
1016                         once_only = true;
1017                         printk(KERN_WARNING "FS-Cache:"
1018                                " Cookie type %s marked page %lx"
1019                                " multiple times\n",
1020                                cookie->def->name, page->index);
1021                 }
1022         }
1023
1024         if (cookie->def->mark_page_cached)
1025                 cookie->def->mark_page_cached(cookie->netfs_data,
1026                                               op->mapping, page);
1027 }
1028 EXPORT_SYMBOL(fscache_mark_page_cached);
1029
1030 /**
1031  * fscache_mark_pages_cached - Mark pages as being cached
1032  * @op: The retrieval op pages are being marked for
1033  * @pagevec: The pages to be marked
1034  *
1035  * Mark a bunch of netfs pages as being cached.  After this is called,
1036  * the netfs must call fscache_uncache_page() to remove the mark.
1037  */
1038 void fscache_mark_pages_cached(struct fscache_retrieval *op,
1039                                struct pagevec *pagevec)
1040 {
1041         unsigned long loop;
1042
1043         for (loop = 0; loop < pagevec->nr; loop++)
1044                 fscache_mark_page_cached(op, pagevec->pages[loop]);
1045
1046         pagevec_reinit(pagevec);
1047 }
1048 EXPORT_SYMBOL(fscache_mark_pages_cached);
1049
1050 /*
1051  * Uncache all the pages in an inode that are marked PG_fscache, assuming them
1052  * to be associated with the given cookie.
1053  */
1054 void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
1055                                        struct inode *inode)
1056 {
1057         struct address_space *mapping = inode->i_mapping;
1058         struct pagevec pvec;
1059         pgoff_t next;
1060         int i;
1061
1062         _enter("%p,%p", cookie, inode);
1063
1064         if (!mapping || mapping->nrpages == 0) {
1065                 _leave(" [no pages]");
1066                 return;
1067         }
1068
1069         pagevec_init(&pvec, 0);
1070         next = 0;
1071         do {
1072                 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
1073                         break;
1074                 for (i = 0; i < pagevec_count(&pvec); i++) {
1075                         struct page *page = pvec.pages[i];
1076                         next = page->index;
1077                         if (PageFsCache(page)) {
1078                                 __fscache_wait_on_page_write(cookie, page);
1079                                 __fscache_uncache_page(cookie, page);
1080                         }
1081                 }
1082                 pagevec_release(&pvec);
1083                 cond_resched();
1084         } while (++next);
1085
1086         _leave("");
1087 }
1088 EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);